1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2017-2018 Christoph Hellwig.
4 */
5
6 #include <linux/backing-dev.h>
7 #include <linux/moduleparam.h>
8 #include <linux/vmalloc.h>
9 #include <trace/events/block.h>
10 #include "nvme.h"
11
12 bool multipath = true;
13 module_param(multipath, bool, 0444);
14 MODULE_PARM_DESC(multipath,
15 "turn on native support for multiple controllers per subsystem");
16
17 static const char *nvme_iopolicy_names[] = {
18 [NVME_IOPOLICY_NUMA] = "numa",
19 [NVME_IOPOLICY_RR] = "round-robin",
20 };
21
22 static int iopolicy = NVME_IOPOLICY_NUMA;
23
nvme_set_iopolicy(const char * val,const struct kernel_param * kp)24 static int nvme_set_iopolicy(const char *val, const struct kernel_param *kp)
25 {
26 if (!val)
27 return -EINVAL;
28 if (!strncmp(val, "numa", 4))
29 iopolicy = NVME_IOPOLICY_NUMA;
30 else if (!strncmp(val, "round-robin", 11))
31 iopolicy = NVME_IOPOLICY_RR;
32 else
33 return -EINVAL;
34
35 return 0;
36 }
37
nvme_get_iopolicy(char * buf,const struct kernel_param * kp)38 static int nvme_get_iopolicy(char *buf, const struct kernel_param *kp)
39 {
40 return sprintf(buf, "%s\n", nvme_iopolicy_names[iopolicy]);
41 }
42
43 module_param_call(iopolicy, nvme_set_iopolicy, nvme_get_iopolicy,
44 &iopolicy, 0644);
45 MODULE_PARM_DESC(iopolicy,
46 "Default multipath I/O policy; 'numa' (default) or 'round-robin'");
47
nvme_mpath_default_iopolicy(struct nvme_subsystem * subsys)48 void nvme_mpath_default_iopolicy(struct nvme_subsystem *subsys)
49 {
50 subsys->iopolicy = iopolicy;
51 }
52
nvme_mpath_unfreeze(struct nvme_subsystem * subsys)53 void nvme_mpath_unfreeze(struct nvme_subsystem *subsys)
54 {
55 struct nvme_ns_head *h;
56
57 lockdep_assert_held(&subsys->lock);
58 list_for_each_entry(h, &subsys->nsheads, entry)
59 if (h->disk)
60 blk_mq_unfreeze_queue(h->disk->queue);
61 }
62
nvme_mpath_wait_freeze(struct nvme_subsystem * subsys)63 void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys)
64 {
65 struct nvme_ns_head *h;
66
67 lockdep_assert_held(&subsys->lock);
68 list_for_each_entry(h, &subsys->nsheads, entry)
69 if (h->disk)
70 blk_mq_freeze_queue_wait(h->disk->queue);
71 }
72
nvme_mpath_start_freeze(struct nvme_subsystem * subsys)73 void nvme_mpath_start_freeze(struct nvme_subsystem *subsys)
74 {
75 struct nvme_ns_head *h;
76
77 lockdep_assert_held(&subsys->lock);
78 list_for_each_entry(h, &subsys->nsheads, entry)
79 if (h->disk)
80 blk_freeze_queue_start(h->disk->queue);
81 }
82
nvme_failover_req(struct request * req)83 void nvme_failover_req(struct request *req)
84 {
85 struct nvme_ns *ns = req->q->queuedata;
86 u16 status = nvme_req(req)->status & 0x7ff;
87 unsigned long flags;
88 struct bio *bio;
89
90 nvme_mpath_clear_current_path(ns);
91
92 /*
93 * If we got back an ANA error, we know the controller is alive but not
94 * ready to serve this namespace. Kick of a re-read of the ANA
95 * information page, and just try any other available path for now.
96 */
97 if (nvme_is_ana_error(status) && ns->ctrl->ana_log_buf) {
98 set_bit(NVME_NS_ANA_PENDING, &ns->flags);
99 queue_work(nvme_wq, &ns->ctrl->ana_work);
100 }
101
102 spin_lock_irqsave(&ns->head->requeue_lock, flags);
103 for (bio = req->bio; bio; bio = bio->bi_next) {
104 bio_set_dev(bio, ns->head->disk->part0);
105 if (bio->bi_opf & REQ_POLLED) {
106 bio->bi_opf &= ~REQ_POLLED;
107 bio->bi_cookie = BLK_QC_T_NONE;
108 }
109 }
110 blk_steal_bios(&ns->head->requeue_list, req);
111 spin_unlock_irqrestore(&ns->head->requeue_lock, flags);
112
113 blk_mq_end_request(req, 0);
114 kblockd_schedule_work(&ns->head->requeue_work);
115 }
116
nvme_kick_requeue_lists(struct nvme_ctrl * ctrl)117 void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl)
118 {
119 struct nvme_ns *ns;
120
121 down_read(&ctrl->namespaces_rwsem);
122 list_for_each_entry(ns, &ctrl->namespaces, list) {
123 if (!ns->head->disk)
124 continue;
125 kblockd_schedule_work(&ns->head->requeue_work);
126 if (ctrl->state == NVME_CTRL_LIVE)
127 disk_uevent(ns->head->disk, KOBJ_CHANGE);
128 }
129 up_read(&ctrl->namespaces_rwsem);
130 }
131
132 static const char *nvme_ana_state_names[] = {
133 [0] = "invalid state",
134 [NVME_ANA_OPTIMIZED] = "optimized",
135 [NVME_ANA_NONOPTIMIZED] = "non-optimized",
136 [NVME_ANA_INACCESSIBLE] = "inaccessible",
137 [NVME_ANA_PERSISTENT_LOSS] = "persistent-loss",
138 [NVME_ANA_CHANGE] = "change",
139 };
140
nvme_mpath_clear_current_path(struct nvme_ns * ns)141 bool nvme_mpath_clear_current_path(struct nvme_ns *ns)
142 {
143 struct nvme_ns_head *head = ns->head;
144 bool changed = false;
145 int node;
146
147 if (!head)
148 goto out;
149
150 for_each_node(node) {
151 if (ns == rcu_access_pointer(head->current_path[node])) {
152 rcu_assign_pointer(head->current_path[node], NULL);
153 changed = true;
154 }
155 }
156 out:
157 return changed;
158 }
159
nvme_mpath_clear_ctrl_paths(struct nvme_ctrl * ctrl)160 void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl)
161 {
162 struct nvme_ns *ns;
163
164 down_read(&ctrl->namespaces_rwsem);
165 list_for_each_entry(ns, &ctrl->namespaces, list) {
166 nvme_mpath_clear_current_path(ns);
167 kblockd_schedule_work(&ns->head->requeue_work);
168 }
169 up_read(&ctrl->namespaces_rwsem);
170 }
171
nvme_mpath_revalidate_paths(struct nvme_ns * ns)172 void nvme_mpath_revalidate_paths(struct nvme_ns *ns)
173 {
174 struct nvme_ns_head *head = ns->head;
175 sector_t capacity = get_capacity(head->disk);
176 int node;
177 int srcu_idx;
178
179 srcu_idx = srcu_read_lock(&head->srcu);
180 list_for_each_entry_rcu(ns, &head->list, siblings) {
181 if (capacity != get_capacity(ns->disk))
182 clear_bit(NVME_NS_READY, &ns->flags);
183 }
184 srcu_read_unlock(&head->srcu, srcu_idx);
185
186 for_each_node(node)
187 rcu_assign_pointer(head->current_path[node], NULL);
188 kblockd_schedule_work(&head->requeue_work);
189 }
190
nvme_path_is_disabled(struct nvme_ns * ns)191 static bool nvme_path_is_disabled(struct nvme_ns *ns)
192 {
193 /*
194 * We don't treat NVME_CTRL_DELETING as a disabled path as I/O should
195 * still be able to complete assuming that the controller is connected.
196 * Otherwise it will fail immediately and return to the requeue list.
197 */
198 if (ns->ctrl->state != NVME_CTRL_LIVE &&
199 ns->ctrl->state != NVME_CTRL_DELETING)
200 return true;
201 if (test_bit(NVME_NS_ANA_PENDING, &ns->flags) ||
202 !test_bit(NVME_NS_READY, &ns->flags))
203 return true;
204 return false;
205 }
206
__nvme_find_path(struct nvme_ns_head * head,int node)207 static struct nvme_ns *__nvme_find_path(struct nvme_ns_head *head, int node)
208 {
209 int found_distance = INT_MAX, fallback_distance = INT_MAX, distance;
210 struct nvme_ns *found = NULL, *fallback = NULL, *ns;
211
212 list_for_each_entry_rcu(ns, &head->list, siblings) {
213 if (nvme_path_is_disabled(ns))
214 continue;
215
216 if (READ_ONCE(head->subsys->iopolicy) == NVME_IOPOLICY_NUMA)
217 distance = node_distance(node, ns->ctrl->numa_node);
218 else
219 distance = LOCAL_DISTANCE;
220
221 switch (ns->ana_state) {
222 case NVME_ANA_OPTIMIZED:
223 if (distance < found_distance) {
224 found_distance = distance;
225 found = ns;
226 }
227 break;
228 case NVME_ANA_NONOPTIMIZED:
229 if (distance < fallback_distance) {
230 fallback_distance = distance;
231 fallback = ns;
232 }
233 break;
234 default:
235 break;
236 }
237 }
238
239 if (!found)
240 found = fallback;
241 if (found)
242 rcu_assign_pointer(head->current_path[node], found);
243 return found;
244 }
245
nvme_next_ns(struct nvme_ns_head * head,struct nvme_ns * ns)246 static struct nvme_ns *nvme_next_ns(struct nvme_ns_head *head,
247 struct nvme_ns *ns)
248 {
249 ns = list_next_or_null_rcu(&head->list, &ns->siblings, struct nvme_ns,
250 siblings);
251 if (ns)
252 return ns;
253 return list_first_or_null_rcu(&head->list, struct nvme_ns, siblings);
254 }
255
nvme_round_robin_path(struct nvme_ns_head * head,int node,struct nvme_ns * old)256 static struct nvme_ns *nvme_round_robin_path(struct nvme_ns_head *head,
257 int node, struct nvme_ns *old)
258 {
259 struct nvme_ns *ns, *found = NULL;
260
261 if (list_is_singular(&head->list)) {
262 if (nvme_path_is_disabled(old))
263 return NULL;
264 return old;
265 }
266
267 for (ns = nvme_next_ns(head, old);
268 ns && ns != old;
269 ns = nvme_next_ns(head, ns)) {
270 if (nvme_path_is_disabled(ns))
271 continue;
272
273 if (ns->ana_state == NVME_ANA_OPTIMIZED) {
274 found = ns;
275 goto out;
276 }
277 if (ns->ana_state == NVME_ANA_NONOPTIMIZED)
278 found = ns;
279 }
280
281 /*
282 * The loop above skips the current path for round-robin semantics.
283 * Fall back to the current path if either:
284 * - no other optimized path found and current is optimized,
285 * - no other usable path found and current is usable.
286 */
287 if (!nvme_path_is_disabled(old) &&
288 (old->ana_state == NVME_ANA_OPTIMIZED ||
289 (!found && old->ana_state == NVME_ANA_NONOPTIMIZED)))
290 return old;
291
292 if (!found)
293 return NULL;
294 out:
295 rcu_assign_pointer(head->current_path[node], found);
296 return found;
297 }
298
nvme_path_is_optimized(struct nvme_ns * ns)299 static inline bool nvme_path_is_optimized(struct nvme_ns *ns)
300 {
301 return ns->ctrl->state == NVME_CTRL_LIVE &&
302 ns->ana_state == NVME_ANA_OPTIMIZED;
303 }
304
nvme_find_path(struct nvme_ns_head * head)305 inline struct nvme_ns *nvme_find_path(struct nvme_ns_head *head)
306 {
307 int node = numa_node_id();
308 struct nvme_ns *ns;
309
310 ns = srcu_dereference(head->current_path[node], &head->srcu);
311 if (unlikely(!ns))
312 return __nvme_find_path(head, node);
313
314 if (READ_ONCE(head->subsys->iopolicy) == NVME_IOPOLICY_RR)
315 return nvme_round_robin_path(head, node, ns);
316 if (unlikely(!nvme_path_is_optimized(ns)))
317 return __nvme_find_path(head, node);
318 return ns;
319 }
320
nvme_available_path(struct nvme_ns_head * head)321 static bool nvme_available_path(struct nvme_ns_head *head)
322 {
323 struct nvme_ns *ns;
324
325 list_for_each_entry_rcu(ns, &head->list, siblings) {
326 if (test_bit(NVME_CTRL_FAILFAST_EXPIRED, &ns->ctrl->flags))
327 continue;
328 switch (ns->ctrl->state) {
329 case NVME_CTRL_LIVE:
330 case NVME_CTRL_RESETTING:
331 case NVME_CTRL_CONNECTING:
332 /* fallthru */
333 return true;
334 default:
335 break;
336 }
337 }
338 return false;
339 }
340
nvme_ns_head_submit_bio(struct bio * bio)341 static void nvme_ns_head_submit_bio(struct bio *bio)
342 {
343 struct nvme_ns_head *head = bio->bi_bdev->bd_disk->private_data;
344 struct device *dev = disk_to_dev(head->disk);
345 struct nvme_ns *ns;
346 int srcu_idx;
347
348 /*
349 * The namespace might be going away and the bio might be moved to a
350 * different queue via blk_steal_bios(), so we need to use the bio_split
351 * pool from the original queue to allocate the bvecs from.
352 */
353 bio = bio_split_to_limits(bio);
354
355 srcu_idx = srcu_read_lock(&head->srcu);
356 ns = nvme_find_path(head);
357 if (likely(ns)) {
358 bio_set_dev(bio, ns->disk->part0);
359 bio->bi_opf |= REQ_NVME_MPATH;
360 trace_block_bio_remap(bio, disk_devt(ns->head->disk),
361 bio->bi_iter.bi_sector);
362 submit_bio_noacct(bio);
363 } else if (nvme_available_path(head)) {
364 dev_warn_ratelimited(dev, "no usable path - requeuing I/O\n");
365
366 spin_lock_irq(&head->requeue_lock);
367 bio_list_add(&head->requeue_list, bio);
368 spin_unlock_irq(&head->requeue_lock);
369 } else {
370 dev_warn_ratelimited(dev, "no available path - failing I/O\n");
371
372 bio_io_error(bio);
373 }
374
375 srcu_read_unlock(&head->srcu, srcu_idx);
376 }
377
nvme_ns_head_open(struct block_device * bdev,fmode_t mode)378 static int nvme_ns_head_open(struct block_device *bdev, fmode_t mode)
379 {
380 if (!nvme_tryget_ns_head(bdev->bd_disk->private_data))
381 return -ENXIO;
382 return 0;
383 }
384
nvme_ns_head_release(struct gendisk * disk,fmode_t mode)385 static void nvme_ns_head_release(struct gendisk *disk, fmode_t mode)
386 {
387 nvme_put_ns_head(disk->private_data);
388 }
389
390 #ifdef CONFIG_BLK_DEV_ZONED
nvme_ns_head_report_zones(struct gendisk * disk,sector_t sector,unsigned int nr_zones,report_zones_cb cb,void * data)391 static int nvme_ns_head_report_zones(struct gendisk *disk, sector_t sector,
392 unsigned int nr_zones, report_zones_cb cb, void *data)
393 {
394 struct nvme_ns_head *head = disk->private_data;
395 struct nvme_ns *ns;
396 int srcu_idx, ret = -EWOULDBLOCK;
397
398 srcu_idx = srcu_read_lock(&head->srcu);
399 ns = nvme_find_path(head);
400 if (ns)
401 ret = nvme_ns_report_zones(ns, sector, nr_zones, cb, data);
402 srcu_read_unlock(&head->srcu, srcu_idx);
403 return ret;
404 }
405 #else
406 #define nvme_ns_head_report_zones NULL
407 #endif /* CONFIG_BLK_DEV_ZONED */
408
409 const struct block_device_operations nvme_ns_head_ops = {
410 .owner = THIS_MODULE,
411 .submit_bio = nvme_ns_head_submit_bio,
412 .open = nvme_ns_head_open,
413 .release = nvme_ns_head_release,
414 .ioctl = nvme_ns_head_ioctl,
415 .compat_ioctl = blkdev_compat_ptr_ioctl,
416 .getgeo = nvme_getgeo,
417 .report_zones = nvme_ns_head_report_zones,
418 .pr_ops = &nvme_pr_ops,
419 };
420
cdev_to_ns_head(struct cdev * cdev)421 static inline struct nvme_ns_head *cdev_to_ns_head(struct cdev *cdev)
422 {
423 return container_of(cdev, struct nvme_ns_head, cdev);
424 }
425
nvme_ns_head_chr_open(struct inode * inode,struct file * file)426 static int nvme_ns_head_chr_open(struct inode *inode, struct file *file)
427 {
428 if (!nvme_tryget_ns_head(cdev_to_ns_head(inode->i_cdev)))
429 return -ENXIO;
430 return 0;
431 }
432
nvme_ns_head_chr_release(struct inode * inode,struct file * file)433 static int nvme_ns_head_chr_release(struct inode *inode, struct file *file)
434 {
435 nvme_put_ns_head(cdev_to_ns_head(inode->i_cdev));
436 return 0;
437 }
438
439 static const struct file_operations nvme_ns_head_chr_fops = {
440 .owner = THIS_MODULE,
441 .open = nvme_ns_head_chr_open,
442 .release = nvme_ns_head_chr_release,
443 .unlocked_ioctl = nvme_ns_head_chr_ioctl,
444 .compat_ioctl = compat_ptr_ioctl,
445 .uring_cmd = nvme_ns_head_chr_uring_cmd,
446 .uring_cmd_iopoll = nvme_ns_head_chr_uring_cmd_iopoll,
447 };
448
nvme_add_ns_head_cdev(struct nvme_ns_head * head)449 static int nvme_add_ns_head_cdev(struct nvme_ns_head *head)
450 {
451 int ret;
452
453 head->cdev_device.parent = &head->subsys->dev;
454 ret = dev_set_name(&head->cdev_device, "ng%dn%d",
455 head->subsys->instance, head->instance);
456 if (ret)
457 return ret;
458 ret = nvme_cdev_add(&head->cdev, &head->cdev_device,
459 &nvme_ns_head_chr_fops, THIS_MODULE);
460 return ret;
461 }
462
nvme_requeue_work(struct work_struct * work)463 static void nvme_requeue_work(struct work_struct *work)
464 {
465 struct nvme_ns_head *head =
466 container_of(work, struct nvme_ns_head, requeue_work);
467 struct bio *bio, *next;
468
469 spin_lock_irq(&head->requeue_lock);
470 next = bio_list_get(&head->requeue_list);
471 spin_unlock_irq(&head->requeue_lock);
472
473 while ((bio = next) != NULL) {
474 next = bio->bi_next;
475 bio->bi_next = NULL;
476
477 submit_bio_noacct(bio);
478 }
479 }
480
nvme_mpath_alloc_disk(struct nvme_ctrl * ctrl,struct nvme_ns_head * head)481 int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head)
482 {
483 bool vwc = false;
484
485 mutex_init(&head->lock);
486 bio_list_init(&head->requeue_list);
487 spin_lock_init(&head->requeue_lock);
488 INIT_WORK(&head->requeue_work, nvme_requeue_work);
489
490 /*
491 * Add a multipath node if the subsystems supports multiple controllers.
492 * We also do this for private namespaces as the namespace sharing flag
493 * could change after a rescan.
494 */
495 if (!(ctrl->subsys->cmic & NVME_CTRL_CMIC_MULTI_CTRL) ||
496 !nvme_is_unique_nsid(ctrl, head) || !multipath)
497 return 0;
498
499 head->disk = blk_alloc_disk(ctrl->numa_node);
500 if (!head->disk)
501 return -ENOMEM;
502 head->disk->fops = &nvme_ns_head_ops;
503 head->disk->private_data = head;
504 sprintf(head->disk->disk_name, "nvme%dn%d",
505 ctrl->subsys->instance, head->instance);
506
507 blk_queue_flag_set(QUEUE_FLAG_NONROT, head->disk->queue);
508 blk_queue_flag_set(QUEUE_FLAG_NOWAIT, head->disk->queue);
509 /*
510 * This assumes all controllers that refer to a namespace either
511 * support poll queues or not. That is not a strict guarantee,
512 * but if the assumption is wrong the effect is only suboptimal
513 * performance but not correctness problem.
514 */
515 if (ctrl->tagset->nr_maps > HCTX_TYPE_POLL &&
516 ctrl->tagset->map[HCTX_TYPE_POLL].nr_queues)
517 blk_queue_flag_set(QUEUE_FLAG_POLL, head->disk->queue);
518
519 /* set to a default value of 512 until the disk is validated */
520 blk_queue_logical_block_size(head->disk->queue, 512);
521 blk_set_stacking_limits(&head->disk->queue->limits);
522 blk_queue_dma_alignment(head->disk->queue, 3);
523
524 /* we need to propagate up the VMC settings */
525 if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)
526 vwc = true;
527 blk_queue_write_cache(head->disk->queue, vwc, vwc);
528 return 0;
529 }
530
nvme_mpath_set_live(struct nvme_ns * ns)531 static void nvme_mpath_set_live(struct nvme_ns *ns)
532 {
533 struct nvme_ns_head *head = ns->head;
534 int rc;
535
536 if (!head->disk)
537 return;
538
539 /*
540 * test_and_set_bit() is used because it is protecting against two nvme
541 * paths simultaneously calling device_add_disk() on the same namespace
542 * head.
543 */
544 if (!test_and_set_bit(NVME_NSHEAD_DISK_LIVE, &head->flags)) {
545 rc = device_add_disk(&head->subsys->dev, head->disk,
546 nvme_ns_id_attr_groups);
547 if (rc) {
548 clear_bit(NVME_NSHEAD_DISK_LIVE, &ns->flags);
549 return;
550 }
551 nvme_add_ns_head_cdev(head);
552 }
553
554 mutex_lock(&head->lock);
555 if (nvme_path_is_optimized(ns)) {
556 int node, srcu_idx;
557
558 srcu_idx = srcu_read_lock(&head->srcu);
559 for_each_node(node)
560 __nvme_find_path(head, node);
561 srcu_read_unlock(&head->srcu, srcu_idx);
562 }
563 mutex_unlock(&head->lock);
564
565 synchronize_srcu(&head->srcu);
566 kblockd_schedule_work(&head->requeue_work);
567 }
568
nvme_parse_ana_log(struct nvme_ctrl * ctrl,void * data,int (* cb)(struct nvme_ctrl * ctrl,struct nvme_ana_group_desc *,void *))569 static int nvme_parse_ana_log(struct nvme_ctrl *ctrl, void *data,
570 int (*cb)(struct nvme_ctrl *ctrl, struct nvme_ana_group_desc *,
571 void *))
572 {
573 void *base = ctrl->ana_log_buf;
574 size_t offset = sizeof(struct nvme_ana_rsp_hdr);
575 int error, i;
576
577 lockdep_assert_held(&ctrl->ana_lock);
578
579 for (i = 0; i < le16_to_cpu(ctrl->ana_log_buf->ngrps); i++) {
580 struct nvme_ana_group_desc *desc = base + offset;
581 u32 nr_nsids;
582 size_t nsid_buf_size;
583
584 if (WARN_ON_ONCE(offset > ctrl->ana_log_size - sizeof(*desc)))
585 return -EINVAL;
586
587 nr_nsids = le32_to_cpu(desc->nnsids);
588 nsid_buf_size = flex_array_size(desc, nsids, nr_nsids);
589
590 if (WARN_ON_ONCE(desc->grpid == 0))
591 return -EINVAL;
592 if (WARN_ON_ONCE(le32_to_cpu(desc->grpid) > ctrl->anagrpmax))
593 return -EINVAL;
594 if (WARN_ON_ONCE(desc->state == 0))
595 return -EINVAL;
596 if (WARN_ON_ONCE(desc->state > NVME_ANA_CHANGE))
597 return -EINVAL;
598
599 offset += sizeof(*desc);
600 if (WARN_ON_ONCE(offset > ctrl->ana_log_size - nsid_buf_size))
601 return -EINVAL;
602
603 error = cb(ctrl, desc, data);
604 if (error)
605 return error;
606
607 offset += nsid_buf_size;
608 }
609
610 return 0;
611 }
612
nvme_state_is_live(enum nvme_ana_state state)613 static inline bool nvme_state_is_live(enum nvme_ana_state state)
614 {
615 return state == NVME_ANA_OPTIMIZED || state == NVME_ANA_NONOPTIMIZED;
616 }
617
nvme_update_ns_ana_state(struct nvme_ana_group_desc * desc,struct nvme_ns * ns)618 static void nvme_update_ns_ana_state(struct nvme_ana_group_desc *desc,
619 struct nvme_ns *ns)
620 {
621 ns->ana_grpid = le32_to_cpu(desc->grpid);
622 ns->ana_state = desc->state;
623 clear_bit(NVME_NS_ANA_PENDING, &ns->flags);
624 /*
625 * nvme_mpath_set_live() will trigger I/O to the multipath path device
626 * and in turn to this path device. However we cannot accept this I/O
627 * if the controller is not live. This may deadlock if called from
628 * nvme_mpath_init_identify() and the ctrl will never complete
629 * initialization, preventing I/O from completing. For this case we
630 * will reprocess the ANA log page in nvme_mpath_update() once the
631 * controller is ready.
632 */
633 if (nvme_state_is_live(ns->ana_state) &&
634 ns->ctrl->state == NVME_CTRL_LIVE)
635 nvme_mpath_set_live(ns);
636 }
637
nvme_update_ana_state(struct nvme_ctrl * ctrl,struct nvme_ana_group_desc * desc,void * data)638 static int nvme_update_ana_state(struct nvme_ctrl *ctrl,
639 struct nvme_ana_group_desc *desc, void *data)
640 {
641 u32 nr_nsids = le32_to_cpu(desc->nnsids), n = 0;
642 unsigned *nr_change_groups = data;
643 struct nvme_ns *ns;
644
645 dev_dbg(ctrl->device, "ANA group %d: %s.\n",
646 le32_to_cpu(desc->grpid),
647 nvme_ana_state_names[desc->state]);
648
649 if (desc->state == NVME_ANA_CHANGE)
650 (*nr_change_groups)++;
651
652 if (!nr_nsids)
653 return 0;
654
655 down_read(&ctrl->namespaces_rwsem);
656 list_for_each_entry(ns, &ctrl->namespaces, list) {
657 unsigned nsid;
658 again:
659 nsid = le32_to_cpu(desc->nsids[n]);
660 if (ns->head->ns_id < nsid)
661 continue;
662 if (ns->head->ns_id == nsid)
663 nvme_update_ns_ana_state(desc, ns);
664 if (++n == nr_nsids)
665 break;
666 if (ns->head->ns_id > nsid)
667 goto again;
668 }
669 up_read(&ctrl->namespaces_rwsem);
670 return 0;
671 }
672
nvme_read_ana_log(struct nvme_ctrl * ctrl)673 static int nvme_read_ana_log(struct nvme_ctrl *ctrl)
674 {
675 u32 nr_change_groups = 0;
676 int error;
677
678 mutex_lock(&ctrl->ana_lock);
679 error = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_ANA, 0, NVME_CSI_NVM,
680 ctrl->ana_log_buf, ctrl->ana_log_size, 0);
681 if (error) {
682 dev_warn(ctrl->device, "Failed to get ANA log: %d\n", error);
683 goto out_unlock;
684 }
685
686 error = nvme_parse_ana_log(ctrl, &nr_change_groups,
687 nvme_update_ana_state);
688 if (error)
689 goto out_unlock;
690
691 /*
692 * In theory we should have an ANATT timer per group as they might enter
693 * the change state at different times. But that is a lot of overhead
694 * just to protect against a target that keeps entering new changes
695 * states while never finishing previous ones. But we'll still
696 * eventually time out once all groups are in change state, so this
697 * isn't a big deal.
698 *
699 * We also double the ANATT value to provide some slack for transports
700 * or AEN processing overhead.
701 */
702 if (nr_change_groups)
703 mod_timer(&ctrl->anatt_timer, ctrl->anatt * HZ * 2 + jiffies);
704 else
705 del_timer_sync(&ctrl->anatt_timer);
706 out_unlock:
707 mutex_unlock(&ctrl->ana_lock);
708 return error;
709 }
710
nvme_ana_work(struct work_struct * work)711 static void nvme_ana_work(struct work_struct *work)
712 {
713 struct nvme_ctrl *ctrl = container_of(work, struct nvme_ctrl, ana_work);
714
715 if (ctrl->state != NVME_CTRL_LIVE)
716 return;
717
718 nvme_read_ana_log(ctrl);
719 }
720
nvme_mpath_update(struct nvme_ctrl * ctrl)721 void nvme_mpath_update(struct nvme_ctrl *ctrl)
722 {
723 u32 nr_change_groups = 0;
724
725 if (!ctrl->ana_log_buf)
726 return;
727
728 mutex_lock(&ctrl->ana_lock);
729 nvme_parse_ana_log(ctrl, &nr_change_groups, nvme_update_ana_state);
730 mutex_unlock(&ctrl->ana_lock);
731 }
732
nvme_anatt_timeout(struct timer_list * t)733 static void nvme_anatt_timeout(struct timer_list *t)
734 {
735 struct nvme_ctrl *ctrl = from_timer(ctrl, t, anatt_timer);
736
737 dev_info(ctrl->device, "ANATT timeout, resetting controller.\n");
738 nvme_reset_ctrl(ctrl);
739 }
740
nvme_mpath_stop(struct nvme_ctrl * ctrl)741 void nvme_mpath_stop(struct nvme_ctrl *ctrl)
742 {
743 if (!nvme_ctrl_use_ana(ctrl))
744 return;
745 del_timer_sync(&ctrl->anatt_timer);
746 cancel_work_sync(&ctrl->ana_work);
747 }
748
749 #define SUBSYS_ATTR_RW(_name, _mode, _show, _store) \
750 struct device_attribute subsys_attr_##_name = \
751 __ATTR(_name, _mode, _show, _store)
752
nvme_subsys_iopolicy_show(struct device * dev,struct device_attribute * attr,char * buf)753 static ssize_t nvme_subsys_iopolicy_show(struct device *dev,
754 struct device_attribute *attr, char *buf)
755 {
756 struct nvme_subsystem *subsys =
757 container_of(dev, struct nvme_subsystem, dev);
758
759 return sysfs_emit(buf, "%s\n",
760 nvme_iopolicy_names[READ_ONCE(subsys->iopolicy)]);
761 }
762
nvme_subsys_iopolicy_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)763 static ssize_t nvme_subsys_iopolicy_store(struct device *dev,
764 struct device_attribute *attr, const char *buf, size_t count)
765 {
766 struct nvme_subsystem *subsys =
767 container_of(dev, struct nvme_subsystem, dev);
768 int i;
769
770 for (i = 0; i < ARRAY_SIZE(nvme_iopolicy_names); i++) {
771 if (sysfs_streq(buf, nvme_iopolicy_names[i])) {
772 WRITE_ONCE(subsys->iopolicy, i);
773 return count;
774 }
775 }
776
777 return -EINVAL;
778 }
779 SUBSYS_ATTR_RW(iopolicy, S_IRUGO | S_IWUSR,
780 nvme_subsys_iopolicy_show, nvme_subsys_iopolicy_store);
781
ana_grpid_show(struct device * dev,struct device_attribute * attr,char * buf)782 static ssize_t ana_grpid_show(struct device *dev, struct device_attribute *attr,
783 char *buf)
784 {
785 return sysfs_emit(buf, "%d\n", nvme_get_ns_from_dev(dev)->ana_grpid);
786 }
787 DEVICE_ATTR_RO(ana_grpid);
788
ana_state_show(struct device * dev,struct device_attribute * attr,char * buf)789 static ssize_t ana_state_show(struct device *dev, struct device_attribute *attr,
790 char *buf)
791 {
792 struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
793
794 return sysfs_emit(buf, "%s\n", nvme_ana_state_names[ns->ana_state]);
795 }
796 DEVICE_ATTR_RO(ana_state);
797
nvme_lookup_ana_group_desc(struct nvme_ctrl * ctrl,struct nvme_ana_group_desc * desc,void * data)798 static int nvme_lookup_ana_group_desc(struct nvme_ctrl *ctrl,
799 struct nvme_ana_group_desc *desc, void *data)
800 {
801 struct nvme_ana_group_desc *dst = data;
802
803 if (desc->grpid != dst->grpid)
804 return 0;
805
806 *dst = *desc;
807 return -ENXIO; /* just break out of the loop */
808 }
809
nvme_mpath_add_disk(struct nvme_ns * ns,__le32 anagrpid)810 void nvme_mpath_add_disk(struct nvme_ns *ns, __le32 anagrpid)
811 {
812 if (nvme_ctrl_use_ana(ns->ctrl)) {
813 struct nvme_ana_group_desc desc = {
814 .grpid = anagrpid,
815 .state = 0,
816 };
817
818 mutex_lock(&ns->ctrl->ana_lock);
819 ns->ana_grpid = le32_to_cpu(anagrpid);
820 nvme_parse_ana_log(ns->ctrl, &desc, nvme_lookup_ana_group_desc);
821 mutex_unlock(&ns->ctrl->ana_lock);
822 if (desc.state) {
823 /* found the group desc: update */
824 nvme_update_ns_ana_state(&desc, ns);
825 } else {
826 /* group desc not found: trigger a re-read */
827 set_bit(NVME_NS_ANA_PENDING, &ns->flags);
828 queue_work(nvme_wq, &ns->ctrl->ana_work);
829 }
830 } else {
831 ns->ana_state = NVME_ANA_OPTIMIZED;
832 nvme_mpath_set_live(ns);
833 }
834
835 if (blk_queue_stable_writes(ns->queue) && ns->head->disk)
836 blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES,
837 ns->head->disk->queue);
838 #ifdef CONFIG_BLK_DEV_ZONED
839 if (blk_queue_is_zoned(ns->queue) && ns->head->disk)
840 ns->head->disk->nr_zones = ns->disk->nr_zones;
841 #endif
842 }
843
nvme_mpath_shutdown_disk(struct nvme_ns_head * head)844 void nvme_mpath_shutdown_disk(struct nvme_ns_head *head)
845 {
846 if (!head->disk)
847 return;
848 kblockd_schedule_work(&head->requeue_work);
849 if (test_bit(NVME_NSHEAD_DISK_LIVE, &head->flags)) {
850 nvme_cdev_del(&head->cdev, &head->cdev_device);
851 del_gendisk(head->disk);
852 }
853 }
854
nvme_mpath_remove_disk(struct nvme_ns_head * head)855 void nvme_mpath_remove_disk(struct nvme_ns_head *head)
856 {
857 if (!head->disk)
858 return;
859 blk_mark_disk_dead(head->disk);
860 /* make sure all pending bios are cleaned up */
861 kblockd_schedule_work(&head->requeue_work);
862 flush_work(&head->requeue_work);
863 put_disk(head->disk);
864 }
865
nvme_mpath_init_ctrl(struct nvme_ctrl * ctrl)866 void nvme_mpath_init_ctrl(struct nvme_ctrl *ctrl)
867 {
868 mutex_init(&ctrl->ana_lock);
869 timer_setup(&ctrl->anatt_timer, nvme_anatt_timeout, 0);
870 INIT_WORK(&ctrl->ana_work, nvme_ana_work);
871 }
872
nvme_mpath_init_identify(struct nvme_ctrl * ctrl,struct nvme_id_ctrl * id)873 int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
874 {
875 size_t max_transfer_size = ctrl->max_hw_sectors << SECTOR_SHIFT;
876 size_t ana_log_size;
877 int error = 0;
878
879 /* check if multipath is enabled and we have the capability */
880 if (!multipath || !ctrl->subsys ||
881 !(ctrl->subsys->cmic & NVME_CTRL_CMIC_ANA))
882 return 0;
883
884 if (!ctrl->max_namespaces ||
885 ctrl->max_namespaces > le32_to_cpu(id->nn)) {
886 dev_err(ctrl->device,
887 "Invalid MNAN value %u\n", ctrl->max_namespaces);
888 return -EINVAL;
889 }
890
891 ctrl->anacap = id->anacap;
892 ctrl->anatt = id->anatt;
893 ctrl->nanagrpid = le32_to_cpu(id->nanagrpid);
894 ctrl->anagrpmax = le32_to_cpu(id->anagrpmax);
895
896 ana_log_size = sizeof(struct nvme_ana_rsp_hdr) +
897 ctrl->nanagrpid * sizeof(struct nvme_ana_group_desc) +
898 ctrl->max_namespaces * sizeof(__le32);
899 if (ana_log_size > max_transfer_size) {
900 dev_err(ctrl->device,
901 "ANA log page size (%zd) larger than MDTS (%zd).\n",
902 ana_log_size, max_transfer_size);
903 dev_err(ctrl->device, "disabling ANA support.\n");
904 goto out_uninit;
905 }
906 if (ana_log_size > ctrl->ana_log_size) {
907 nvme_mpath_stop(ctrl);
908 nvme_mpath_uninit(ctrl);
909 ctrl->ana_log_buf = kvmalloc(ana_log_size, GFP_KERNEL);
910 if (!ctrl->ana_log_buf)
911 return -ENOMEM;
912 }
913 ctrl->ana_log_size = ana_log_size;
914 error = nvme_read_ana_log(ctrl);
915 if (error)
916 goto out_uninit;
917 return 0;
918
919 out_uninit:
920 nvme_mpath_uninit(ctrl);
921 return error;
922 }
923
nvme_mpath_uninit(struct nvme_ctrl * ctrl)924 void nvme_mpath_uninit(struct nvme_ctrl *ctrl)
925 {
926 kvfree(ctrl->ana_log_buf);
927 ctrl->ana_log_buf = NULL;
928 ctrl->ana_log_size = 0;
929 }
930