1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*  Xenbus code for blkif backend
3     Copyright (C) 2005 Rusty Russell <rusty@rustcorp.com.au>
4     Copyright (C) 2005 XenSource Ltd
5 
6 
7 */
8 
9 #define pr_fmt(fmt) "xen-blkback: " fmt
10 
11 #include <linux/module.h>
12 #include <linux/kthread.h>
13 #include <xen/events.h>
14 #include <xen/grant_table.h>
15 #include "common.h"
16 
17 /* On the XenBus the max length of 'ring-ref%u'. */
18 #define RINGREF_NAME_LEN (20)
19 
20 struct backend_info {
21 	struct xenbus_device	*dev;
22 	struct xen_blkif	*blkif;
23 	struct xenbus_watch	backend_watch;
24 	unsigned		major;
25 	unsigned		minor;
26 	char			*mode;
27 };
28 
29 static struct kmem_cache *xen_blkif_cachep;
30 static void connect(struct backend_info *);
31 static int connect_ring(struct backend_info *);
32 static void backend_changed(struct xenbus_watch *, const char *,
33 			    const char *);
34 static void xen_blkif_free(struct xen_blkif *blkif);
35 static void xen_vbd_free(struct xen_vbd *vbd);
36 
xen_blkbk_xenbus(struct backend_info * be)37 struct xenbus_device *xen_blkbk_xenbus(struct backend_info *be)
38 {
39 	return be->dev;
40 }
41 
42 /*
43  * The last request could free the device from softirq context and
44  * xen_blkif_free() can sleep.
45  */
xen_blkif_deferred_free(struct work_struct * work)46 static void xen_blkif_deferred_free(struct work_struct *work)
47 {
48 	struct xen_blkif *blkif;
49 
50 	blkif = container_of(work, struct xen_blkif, free_work);
51 	xen_blkif_free(blkif);
52 }
53 
blkback_name(struct xen_blkif * blkif,char * buf)54 static int blkback_name(struct xen_blkif *blkif, char *buf)
55 {
56 	char *devpath, *devname;
57 	struct xenbus_device *dev = blkif->be->dev;
58 
59 	devpath = xenbus_read(XBT_NIL, dev->nodename, "dev", NULL);
60 	if (IS_ERR(devpath))
61 		return PTR_ERR(devpath);
62 
63 	devname = strstr(devpath, "/dev/");
64 	if (devname != NULL)
65 		devname += strlen("/dev/");
66 	else
67 		devname  = devpath;
68 
69 	snprintf(buf, TASK_COMM_LEN, "%d.%s", blkif->domid, devname);
70 	kfree(devpath);
71 
72 	return 0;
73 }
74 
xen_update_blkif_status(struct xen_blkif * blkif)75 static void xen_update_blkif_status(struct xen_blkif *blkif)
76 {
77 	int err;
78 	char name[TASK_COMM_LEN];
79 	struct xen_blkif_ring *ring;
80 	int i;
81 
82 	/* Not ready to connect? */
83 	if (!blkif->rings || !blkif->rings[0].irq || !blkif->vbd.bdev)
84 		return;
85 
86 	/* Already connected? */
87 	if (blkif->be->dev->state == XenbusStateConnected)
88 		return;
89 
90 	/* Attempt to connect: exit if we fail to. */
91 	connect(blkif->be);
92 	if (blkif->be->dev->state != XenbusStateConnected)
93 		return;
94 
95 	err = blkback_name(blkif, name);
96 	if (err) {
97 		xenbus_dev_error(blkif->be->dev, err, "get blkback dev name");
98 		return;
99 	}
100 
101 	err = filemap_write_and_wait(blkif->vbd.bdev->bd_inode->i_mapping);
102 	if (err) {
103 		xenbus_dev_error(blkif->be->dev, err, "block flush");
104 		return;
105 	}
106 	invalidate_inode_pages2(blkif->vbd.bdev->bd_inode->i_mapping);
107 
108 	for (i = 0; i < blkif->nr_rings; i++) {
109 		ring = &blkif->rings[i];
110 		ring->xenblkd = kthread_run(xen_blkif_schedule, ring, "%s-%d", name, i);
111 		if (IS_ERR(ring->xenblkd)) {
112 			err = PTR_ERR(ring->xenblkd);
113 			ring->xenblkd = NULL;
114 			xenbus_dev_fatal(blkif->be->dev, err,
115 					"start %s-%d xenblkd", name, i);
116 			goto out;
117 		}
118 	}
119 	return;
120 
121 out:
122 	while (--i >= 0) {
123 		ring = &blkif->rings[i];
124 		kthread_stop(ring->xenblkd);
125 	}
126 	return;
127 }
128 
xen_blkif_alloc_rings(struct xen_blkif * blkif)129 static int xen_blkif_alloc_rings(struct xen_blkif *blkif)
130 {
131 	unsigned int r;
132 
133 	blkif->rings = kcalloc(blkif->nr_rings, sizeof(struct xen_blkif_ring),
134 			       GFP_KERNEL);
135 	if (!blkif->rings)
136 		return -ENOMEM;
137 
138 	for (r = 0; r < blkif->nr_rings; r++) {
139 		struct xen_blkif_ring *ring = &blkif->rings[r];
140 
141 		spin_lock_init(&ring->blk_ring_lock);
142 		init_waitqueue_head(&ring->wq);
143 		INIT_LIST_HEAD(&ring->pending_free);
144 		INIT_LIST_HEAD(&ring->persistent_purge_list);
145 		INIT_WORK(&ring->persistent_purge_work, xen_blkbk_unmap_purged_grants);
146 		gnttab_page_cache_init(&ring->free_pages);
147 
148 		spin_lock_init(&ring->pending_free_lock);
149 		init_waitqueue_head(&ring->pending_free_wq);
150 		init_waitqueue_head(&ring->shutdown_wq);
151 		ring->blkif = blkif;
152 		ring->st_print = jiffies;
153 		ring->active = true;
154 	}
155 
156 	return 0;
157 }
158 
xen_blkif_alloc(domid_t domid)159 static struct xen_blkif *xen_blkif_alloc(domid_t domid)
160 {
161 	struct xen_blkif *blkif;
162 
163 	BUILD_BUG_ON(MAX_INDIRECT_PAGES > BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST);
164 
165 	blkif = kmem_cache_zalloc(xen_blkif_cachep, GFP_KERNEL);
166 	if (!blkif)
167 		return ERR_PTR(-ENOMEM);
168 
169 	blkif->domid = domid;
170 	atomic_set(&blkif->refcnt, 1);
171 	init_completion(&blkif->drain_complete);
172 
173 	/*
174 	 * Because freeing back to the cache may be deferred, it is not
175 	 * safe to unload the module (and hence destroy the cache) until
176 	 * this has completed. To prevent premature unloading, take an
177 	 * extra module reference here and release only when the object
178 	 * has been freed back to the cache.
179 	 */
180 	__module_get(THIS_MODULE);
181 	INIT_WORK(&blkif->free_work, xen_blkif_deferred_free);
182 
183 	return blkif;
184 }
185 
xen_blkif_map(struct xen_blkif_ring * ring,grant_ref_t * gref,unsigned int nr_grefs,unsigned int evtchn)186 static int xen_blkif_map(struct xen_blkif_ring *ring, grant_ref_t *gref,
187 			 unsigned int nr_grefs, unsigned int evtchn)
188 {
189 	int err;
190 	struct xen_blkif *blkif = ring->blkif;
191 	const struct blkif_common_sring *sring_common;
192 	RING_IDX rsp_prod, req_prod;
193 	unsigned int size;
194 
195 	/* Already connected through? */
196 	if (ring->irq)
197 		return 0;
198 
199 	err = xenbus_map_ring_valloc(blkif->be->dev, gref, nr_grefs,
200 				     &ring->blk_ring);
201 	if (err < 0)
202 		return err;
203 
204 	sring_common = (struct blkif_common_sring *)ring->blk_ring;
205 	rsp_prod = READ_ONCE(sring_common->rsp_prod);
206 	req_prod = READ_ONCE(sring_common->req_prod);
207 
208 	switch (blkif->blk_protocol) {
209 	case BLKIF_PROTOCOL_NATIVE:
210 	{
211 		struct blkif_sring *sring_native =
212 			(struct blkif_sring *)ring->blk_ring;
213 
214 		BACK_RING_ATTACH(&ring->blk_rings.native, sring_native,
215 				 rsp_prod, XEN_PAGE_SIZE * nr_grefs);
216 		size = __RING_SIZE(sring_native, XEN_PAGE_SIZE * nr_grefs);
217 		break;
218 	}
219 	case BLKIF_PROTOCOL_X86_32:
220 	{
221 		struct blkif_x86_32_sring *sring_x86_32 =
222 			(struct blkif_x86_32_sring *)ring->blk_ring;
223 
224 		BACK_RING_ATTACH(&ring->blk_rings.x86_32, sring_x86_32,
225 				 rsp_prod, XEN_PAGE_SIZE * nr_grefs);
226 		size = __RING_SIZE(sring_x86_32, XEN_PAGE_SIZE * nr_grefs);
227 		break;
228 	}
229 	case BLKIF_PROTOCOL_X86_64:
230 	{
231 		struct blkif_x86_64_sring *sring_x86_64 =
232 			(struct blkif_x86_64_sring *)ring->blk_ring;
233 
234 		BACK_RING_ATTACH(&ring->blk_rings.x86_64, sring_x86_64,
235 				 rsp_prod, XEN_PAGE_SIZE * nr_grefs);
236 		size = __RING_SIZE(sring_x86_64, XEN_PAGE_SIZE * nr_grefs);
237 		break;
238 	}
239 	default:
240 		BUG();
241 	}
242 
243 	err = -EIO;
244 	if (req_prod - rsp_prod > size)
245 		goto fail;
246 
247 	err = bind_interdomain_evtchn_to_irqhandler_lateeoi(blkif->be->dev,
248 			evtchn, xen_blkif_be_int, 0, "blkif-backend", ring);
249 	if (err < 0)
250 		goto fail;
251 	ring->irq = err;
252 
253 	return 0;
254 
255 fail:
256 	xenbus_unmap_ring_vfree(blkif->be->dev, ring->blk_ring);
257 	ring->blk_rings.common.sring = NULL;
258 	return err;
259 }
260 
xen_blkif_disconnect(struct xen_blkif * blkif)261 static int xen_blkif_disconnect(struct xen_blkif *blkif)
262 {
263 	struct pending_req *req, *n;
264 	unsigned int j, r;
265 	bool busy = false;
266 
267 	for (r = 0; r < blkif->nr_rings; r++) {
268 		struct xen_blkif_ring *ring = &blkif->rings[r];
269 		unsigned int i = 0;
270 
271 		if (!ring->active)
272 			continue;
273 
274 		if (ring->xenblkd) {
275 			kthread_stop(ring->xenblkd);
276 			ring->xenblkd = NULL;
277 			wake_up(&ring->shutdown_wq);
278 		}
279 
280 		/* The above kthread_stop() guarantees that at this point we
281 		 * don't have any discard_io or other_io requests. So, checking
282 		 * for inflight IO is enough.
283 		 */
284 		if (atomic_read(&ring->inflight) > 0) {
285 			busy = true;
286 			continue;
287 		}
288 
289 		if (ring->irq) {
290 			unbind_from_irqhandler(ring->irq, ring);
291 			ring->irq = 0;
292 		}
293 
294 		if (ring->blk_rings.common.sring) {
295 			xenbus_unmap_ring_vfree(blkif->be->dev, ring->blk_ring);
296 			ring->blk_rings.common.sring = NULL;
297 		}
298 
299 		/* Remove all persistent grants and the cache of ballooned pages. */
300 		xen_blkbk_free_caches(ring);
301 
302 		/* Check that there is no request in use */
303 		list_for_each_entry_safe(req, n, &ring->pending_free, free_list) {
304 			list_del(&req->free_list);
305 
306 			for (j = 0; j < MAX_INDIRECT_SEGMENTS; j++)
307 				kfree(req->segments[j]);
308 
309 			for (j = 0; j < MAX_INDIRECT_PAGES; j++)
310 				kfree(req->indirect_pages[j]);
311 
312 			kfree(req);
313 			i++;
314 		}
315 
316 		BUG_ON(atomic_read(&ring->persistent_gnt_in_use) != 0);
317 		BUG_ON(!list_empty(&ring->persistent_purge_list));
318 		BUG_ON(!RB_EMPTY_ROOT(&ring->persistent_gnts));
319 		BUG_ON(ring->free_pages.num_pages != 0);
320 		BUG_ON(ring->persistent_gnt_c != 0);
321 		WARN_ON(i != (XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages));
322 		ring->active = false;
323 	}
324 	if (busy)
325 		return -EBUSY;
326 
327 	blkif->nr_ring_pages = 0;
328 	/*
329 	 * blkif->rings was allocated in connect_ring, so we should free it in
330 	 * here.
331 	 */
332 	kfree(blkif->rings);
333 	blkif->rings = NULL;
334 	blkif->nr_rings = 0;
335 
336 	return 0;
337 }
338 
xen_blkif_free(struct xen_blkif * blkif)339 static void xen_blkif_free(struct xen_blkif *blkif)
340 {
341 	WARN_ON(xen_blkif_disconnect(blkif));
342 	xen_vbd_free(&blkif->vbd);
343 	kfree(blkif->be->mode);
344 	kfree(blkif->be);
345 
346 	/* Make sure everything is drained before shutting down */
347 	kmem_cache_free(xen_blkif_cachep, blkif);
348 	module_put(THIS_MODULE);
349 }
350 
xen_blkif_interface_init(void)351 int __init xen_blkif_interface_init(void)
352 {
353 	xen_blkif_cachep = kmem_cache_create("blkif_cache",
354 					     sizeof(struct xen_blkif),
355 					     0, 0, NULL);
356 	if (!xen_blkif_cachep)
357 		return -ENOMEM;
358 
359 	return 0;
360 }
361 
xen_blkif_interface_fini(void)362 void xen_blkif_interface_fini(void)
363 {
364 	kmem_cache_destroy(xen_blkif_cachep);
365 	xen_blkif_cachep = NULL;
366 }
367 
368 /*
369  *  sysfs interface for VBD I/O requests
370  */
371 
372 #define VBD_SHOW_ALLRING(name, format)					\
373 	static ssize_t show_##name(struct device *_dev,			\
374 				   struct device_attribute *attr,	\
375 				   char *buf)				\
376 	{								\
377 		struct xenbus_device *dev = to_xenbus_device(_dev);	\
378 		struct backend_info *be = dev_get_drvdata(&dev->dev);	\
379 		struct xen_blkif *blkif = be->blkif;			\
380 		unsigned int i;						\
381 		unsigned long long result = 0;				\
382 									\
383 		if (!blkif->rings)				\
384 			goto out;					\
385 									\
386 		for (i = 0; i < blkif->nr_rings; i++) {		\
387 			struct xen_blkif_ring *ring = &blkif->rings[i];	\
388 									\
389 			result += ring->st_##name;			\
390 		}							\
391 									\
392 out:									\
393 		return sprintf(buf, format, result);			\
394 	}								\
395 	static DEVICE_ATTR(name, 0444, show_##name, NULL)
396 
397 VBD_SHOW_ALLRING(oo_req,  "%llu\n");
398 VBD_SHOW_ALLRING(rd_req,  "%llu\n");
399 VBD_SHOW_ALLRING(wr_req,  "%llu\n");
400 VBD_SHOW_ALLRING(f_req,  "%llu\n");
401 VBD_SHOW_ALLRING(ds_req,  "%llu\n");
402 VBD_SHOW_ALLRING(rd_sect, "%llu\n");
403 VBD_SHOW_ALLRING(wr_sect, "%llu\n");
404 
405 static struct attribute *xen_vbdstat_attrs[] = {
406 	&dev_attr_oo_req.attr,
407 	&dev_attr_rd_req.attr,
408 	&dev_attr_wr_req.attr,
409 	&dev_attr_f_req.attr,
410 	&dev_attr_ds_req.attr,
411 	&dev_attr_rd_sect.attr,
412 	&dev_attr_wr_sect.attr,
413 	NULL
414 };
415 
416 static const struct attribute_group xen_vbdstat_group = {
417 	.name = "statistics",
418 	.attrs = xen_vbdstat_attrs,
419 };
420 
421 #define VBD_SHOW(name, format, args...)					\
422 	static ssize_t show_##name(struct device *_dev,			\
423 				   struct device_attribute *attr,	\
424 				   char *buf)				\
425 	{								\
426 		struct xenbus_device *dev = to_xenbus_device(_dev);	\
427 		struct backend_info *be = dev_get_drvdata(&dev->dev);	\
428 									\
429 		return sprintf(buf, format, ##args);			\
430 	}								\
431 	static DEVICE_ATTR(name, 0444, show_##name, NULL)
432 
433 VBD_SHOW(physical_device, "%x:%x\n", be->major, be->minor);
434 VBD_SHOW(mode, "%s\n", be->mode);
435 
xenvbd_sysfs_addif(struct xenbus_device * dev)436 static int xenvbd_sysfs_addif(struct xenbus_device *dev)
437 {
438 	int error;
439 
440 	error = device_create_file(&dev->dev, &dev_attr_physical_device);
441 	if (error)
442 		goto fail1;
443 
444 	error = device_create_file(&dev->dev, &dev_attr_mode);
445 	if (error)
446 		goto fail2;
447 
448 	error = sysfs_create_group(&dev->dev.kobj, &xen_vbdstat_group);
449 	if (error)
450 		goto fail3;
451 
452 	return 0;
453 
454 fail3:	sysfs_remove_group(&dev->dev.kobj, &xen_vbdstat_group);
455 fail2:	device_remove_file(&dev->dev, &dev_attr_mode);
456 fail1:	device_remove_file(&dev->dev, &dev_attr_physical_device);
457 	return error;
458 }
459 
xenvbd_sysfs_delif(struct xenbus_device * dev)460 static void xenvbd_sysfs_delif(struct xenbus_device *dev)
461 {
462 	sysfs_remove_group(&dev->dev.kobj, &xen_vbdstat_group);
463 	device_remove_file(&dev->dev, &dev_attr_mode);
464 	device_remove_file(&dev->dev, &dev_attr_physical_device);
465 }
466 
xen_vbd_free(struct xen_vbd * vbd)467 static void xen_vbd_free(struct xen_vbd *vbd)
468 {
469 	if (vbd->bdev)
470 		blkdev_put(vbd->bdev, vbd->readonly ? FMODE_READ : FMODE_WRITE);
471 	vbd->bdev = NULL;
472 }
473 
474 /* Enable the persistent grants feature. */
475 static bool feature_persistent = true;
476 module_param(feature_persistent, bool, 0644);
477 MODULE_PARM_DESC(feature_persistent,
478 		"Enables the persistent grants feature");
479 
xen_vbd_create(struct xen_blkif * blkif,blkif_vdev_t handle,unsigned major,unsigned minor,int readonly,int cdrom)480 static int xen_vbd_create(struct xen_blkif *blkif, blkif_vdev_t handle,
481 			  unsigned major, unsigned minor, int readonly,
482 			  int cdrom)
483 {
484 	struct xen_vbd *vbd;
485 	struct block_device *bdev;
486 	struct request_queue *q;
487 
488 	vbd = &blkif->vbd;
489 	vbd->handle   = handle;
490 	vbd->readonly = readonly;
491 	vbd->type     = 0;
492 
493 	vbd->pdevice  = MKDEV(major, minor);
494 
495 	bdev = blkdev_get_by_dev(vbd->pdevice, vbd->readonly ?
496 				 FMODE_READ : FMODE_WRITE, NULL);
497 
498 	if (IS_ERR(bdev)) {
499 		pr_warn("xen_vbd_create: device %08x could not be opened\n",
500 			vbd->pdevice);
501 		return -ENOENT;
502 	}
503 
504 	vbd->bdev = bdev;
505 	if (vbd->bdev->bd_disk == NULL) {
506 		pr_warn("xen_vbd_create: device %08x doesn't exist\n",
507 			vbd->pdevice);
508 		xen_vbd_free(vbd);
509 		return -ENOENT;
510 	}
511 	vbd->size = vbd_sz(vbd);
512 
513 	if (vbd->bdev->bd_disk->flags & GENHD_FL_CD || cdrom)
514 		vbd->type |= VDISK_CDROM;
515 	if (vbd->bdev->bd_disk->flags & GENHD_FL_REMOVABLE)
516 		vbd->type |= VDISK_REMOVABLE;
517 
518 	q = bdev_get_queue(bdev);
519 	if (q && test_bit(QUEUE_FLAG_WC, &q->queue_flags))
520 		vbd->flush_support = true;
521 
522 	if (q && blk_queue_secure_erase(q))
523 		vbd->discard_secure = true;
524 
525 	vbd->feature_gnt_persistent = feature_persistent;
526 
527 	pr_debug("Successful creation of handle=%04x (dom=%u)\n",
528 		handle, blkif->domid);
529 	return 0;
530 }
531 
xen_blkbk_remove(struct xenbus_device * dev)532 static int xen_blkbk_remove(struct xenbus_device *dev)
533 {
534 	struct backend_info *be = dev_get_drvdata(&dev->dev);
535 
536 	pr_debug("%s %p %d\n", __func__, dev, dev->otherend_id);
537 
538 	if (be->major || be->minor)
539 		xenvbd_sysfs_delif(dev);
540 
541 	if (be->backend_watch.node) {
542 		unregister_xenbus_watch(&be->backend_watch);
543 		kfree(be->backend_watch.node);
544 		be->backend_watch.node = NULL;
545 	}
546 
547 	dev_set_drvdata(&dev->dev, NULL);
548 
549 	if (be->blkif) {
550 		xen_blkif_disconnect(be->blkif);
551 
552 		/* Put the reference we set in xen_blkif_alloc(). */
553 		xen_blkif_put(be->blkif);
554 	}
555 
556 	return 0;
557 }
558 
xen_blkbk_flush_diskcache(struct xenbus_transaction xbt,struct backend_info * be,int state)559 int xen_blkbk_flush_diskcache(struct xenbus_transaction xbt,
560 			      struct backend_info *be, int state)
561 {
562 	struct xenbus_device *dev = be->dev;
563 	int err;
564 
565 	err = xenbus_printf(xbt, dev->nodename, "feature-flush-cache",
566 			    "%d", state);
567 	if (err)
568 		dev_warn(&dev->dev, "writing feature-flush-cache (%d)", err);
569 
570 	return err;
571 }
572 
xen_blkbk_discard(struct xenbus_transaction xbt,struct backend_info * be)573 static void xen_blkbk_discard(struct xenbus_transaction xbt, struct backend_info *be)
574 {
575 	struct xenbus_device *dev = be->dev;
576 	struct xen_blkif *blkif = be->blkif;
577 	int err;
578 	int state = 0;
579 	struct block_device *bdev = be->blkif->vbd.bdev;
580 	struct request_queue *q = bdev_get_queue(bdev);
581 
582 	if (!xenbus_read_unsigned(dev->nodename, "discard-enable", 1))
583 		return;
584 
585 	if (blk_queue_discard(q)) {
586 		err = xenbus_printf(xbt, dev->nodename,
587 			"discard-granularity", "%u",
588 			q->limits.discard_granularity);
589 		if (err) {
590 			dev_warn(&dev->dev, "writing discard-granularity (%d)", err);
591 			return;
592 		}
593 		err = xenbus_printf(xbt, dev->nodename,
594 			"discard-alignment", "%u",
595 			q->limits.discard_alignment);
596 		if (err) {
597 			dev_warn(&dev->dev, "writing discard-alignment (%d)", err);
598 			return;
599 		}
600 		state = 1;
601 		/* Optional. */
602 		err = xenbus_printf(xbt, dev->nodename,
603 				    "discard-secure", "%d",
604 				    blkif->vbd.discard_secure);
605 		if (err) {
606 			dev_warn(&dev->dev, "writing discard-secure (%d)", err);
607 			return;
608 		}
609 	}
610 	err = xenbus_printf(xbt, dev->nodename, "feature-discard",
611 			    "%d", state);
612 	if (err)
613 		dev_warn(&dev->dev, "writing feature-discard (%d)", err);
614 }
615 
xen_blkbk_barrier(struct xenbus_transaction xbt,struct backend_info * be,int state)616 int xen_blkbk_barrier(struct xenbus_transaction xbt,
617 		      struct backend_info *be, int state)
618 {
619 	struct xenbus_device *dev = be->dev;
620 	int err;
621 
622 	err = xenbus_printf(xbt, dev->nodename, "feature-barrier",
623 			    "%d", state);
624 	if (err)
625 		dev_warn(&dev->dev, "writing feature-barrier (%d)", err);
626 
627 	return err;
628 }
629 
630 /*
631  * Entry point to this code when a new device is created.  Allocate the basic
632  * structures, and watch the store waiting for the hotplug scripts to tell us
633  * the device's physical major and minor numbers.  Switch to InitWait.
634  */
xen_blkbk_probe(struct xenbus_device * dev,const struct xenbus_device_id * id)635 static int xen_blkbk_probe(struct xenbus_device *dev,
636 			   const struct xenbus_device_id *id)
637 {
638 	int err;
639 	struct backend_info *be = kzalloc(sizeof(struct backend_info),
640 					  GFP_KERNEL);
641 
642 	/* match the pr_debug in xen_blkbk_remove */
643 	pr_debug("%s %p %d\n", __func__, dev, dev->otherend_id);
644 
645 	if (!be) {
646 		xenbus_dev_fatal(dev, -ENOMEM,
647 				 "allocating backend structure");
648 		return -ENOMEM;
649 	}
650 	be->dev = dev;
651 	dev_set_drvdata(&dev->dev, be);
652 
653 	be->blkif = xen_blkif_alloc(dev->otherend_id);
654 	if (IS_ERR(be->blkif)) {
655 		err = PTR_ERR(be->blkif);
656 		be->blkif = NULL;
657 		xenbus_dev_fatal(dev, err, "creating block interface");
658 		goto fail;
659 	}
660 
661 	err = xenbus_printf(XBT_NIL, dev->nodename,
662 			    "feature-max-indirect-segments", "%u",
663 			    MAX_INDIRECT_SEGMENTS);
664 	if (err)
665 		dev_warn(&dev->dev,
666 			 "writing %s/feature-max-indirect-segments (%d)",
667 			 dev->nodename, err);
668 
669 	/* Multi-queue: advertise how many queues are supported by us.*/
670 	err = xenbus_printf(XBT_NIL, dev->nodename,
671 			    "multi-queue-max-queues", "%u", xenblk_max_queues);
672 	if (err)
673 		pr_warn("Error writing multi-queue-max-queues\n");
674 
675 	/* setup back pointer */
676 	be->blkif->be = be;
677 
678 	err = xenbus_watch_pathfmt(dev, &be->backend_watch, NULL,
679 				   backend_changed,
680 				   "%s/%s", dev->nodename, "physical-device");
681 	if (err)
682 		goto fail;
683 
684 	err = xenbus_printf(XBT_NIL, dev->nodename, "max-ring-page-order", "%u",
685 			    xen_blkif_max_ring_order);
686 	if (err)
687 		pr_warn("%s write out 'max-ring-page-order' failed\n", __func__);
688 
689 	err = xenbus_switch_state(dev, XenbusStateInitWait);
690 	if (err)
691 		goto fail;
692 
693 	return 0;
694 
695 fail:
696 	pr_warn("%s failed\n", __func__);
697 	xen_blkbk_remove(dev);
698 	return err;
699 }
700 
701 /*
702  * Callback received when the hotplug scripts have placed the physical-device
703  * node.  Read it and the mode node, and create a vbd.  If the frontend is
704  * ready, connect.
705  */
backend_changed(struct xenbus_watch * watch,const char * path,const char * token)706 static void backend_changed(struct xenbus_watch *watch,
707 			    const char *path, const char *token)
708 {
709 	int err;
710 	unsigned major;
711 	unsigned minor;
712 	struct backend_info *be
713 		= container_of(watch, struct backend_info, backend_watch);
714 	struct xenbus_device *dev = be->dev;
715 	int cdrom = 0;
716 	unsigned long handle;
717 	char *device_type;
718 
719 	pr_debug("%s %p %d\n", __func__, dev, dev->otherend_id);
720 
721 	err = xenbus_scanf(XBT_NIL, dev->nodename, "physical-device", "%x:%x",
722 			   &major, &minor);
723 	if (XENBUS_EXIST_ERR(err)) {
724 		/*
725 		 * Since this watch will fire once immediately after it is
726 		 * registered, we expect this.  Ignore it, and wait for the
727 		 * hotplug scripts.
728 		 */
729 		return;
730 	}
731 	if (err != 2) {
732 		xenbus_dev_fatal(dev, err, "reading physical-device");
733 		return;
734 	}
735 
736 	if (be->major | be->minor) {
737 		if (be->major != major || be->minor != minor)
738 			pr_warn("changing physical device (from %x:%x to %x:%x) not supported.\n",
739 				be->major, be->minor, major, minor);
740 		return;
741 	}
742 
743 	be->mode = xenbus_read(XBT_NIL, dev->nodename, "mode", NULL);
744 	if (IS_ERR(be->mode)) {
745 		err = PTR_ERR(be->mode);
746 		be->mode = NULL;
747 		xenbus_dev_fatal(dev, err, "reading mode");
748 		return;
749 	}
750 
751 	device_type = xenbus_read(XBT_NIL, dev->otherend, "device-type", NULL);
752 	if (!IS_ERR(device_type)) {
753 		cdrom = strcmp(device_type, "cdrom") == 0;
754 		kfree(device_type);
755 	}
756 
757 	/* Front end dir is a number, which is used as the handle. */
758 	err = kstrtoul(strrchr(dev->otherend, '/') + 1, 0, &handle);
759 	if (err) {
760 		kfree(be->mode);
761 		be->mode = NULL;
762 		return;
763 	}
764 
765 	be->major = major;
766 	be->minor = minor;
767 
768 	err = xen_vbd_create(be->blkif, handle, major, minor,
769 			     !strchr(be->mode, 'w'), cdrom);
770 
771 	if (err)
772 		xenbus_dev_fatal(dev, err, "creating vbd structure");
773 	else {
774 		err = xenvbd_sysfs_addif(dev);
775 		if (err) {
776 			xen_vbd_free(&be->blkif->vbd);
777 			xenbus_dev_fatal(dev, err, "creating sysfs entries");
778 		}
779 	}
780 
781 	if (err) {
782 		kfree(be->mode);
783 		be->mode = NULL;
784 		be->major = 0;
785 		be->minor = 0;
786 	} else {
787 		/* We're potentially connected now */
788 		xen_update_blkif_status(be->blkif);
789 	}
790 }
791 
792 /*
793  * Callback received when the frontend's state changes.
794  */
frontend_changed(struct xenbus_device * dev,enum xenbus_state frontend_state)795 static void frontend_changed(struct xenbus_device *dev,
796 			     enum xenbus_state frontend_state)
797 {
798 	struct backend_info *be = dev_get_drvdata(&dev->dev);
799 	int err;
800 
801 	pr_debug("%s %p %s\n", __func__, dev, xenbus_strstate(frontend_state));
802 
803 	switch (frontend_state) {
804 	case XenbusStateInitialising:
805 		if (dev->state == XenbusStateClosed) {
806 			pr_info("%s: prepare for reconnect\n", dev->nodename);
807 			xenbus_switch_state(dev, XenbusStateInitWait);
808 		}
809 		break;
810 
811 	case XenbusStateInitialised:
812 	case XenbusStateConnected:
813 		/*
814 		 * Ensure we connect even when two watches fire in
815 		 * close succession and we miss the intermediate value
816 		 * of frontend_state.
817 		 */
818 		if (dev->state == XenbusStateConnected)
819 			break;
820 
821 		/*
822 		 * Enforce precondition before potential leak point.
823 		 * xen_blkif_disconnect() is idempotent.
824 		 */
825 		err = xen_blkif_disconnect(be->blkif);
826 		if (err) {
827 			xenbus_dev_fatal(dev, err, "pending I/O");
828 			break;
829 		}
830 
831 		err = connect_ring(be);
832 		if (err) {
833 			/*
834 			 * Clean up so that memory resources can be used by
835 			 * other devices. connect_ring reported already error.
836 			 */
837 			xen_blkif_disconnect(be->blkif);
838 			break;
839 		}
840 		xen_update_blkif_status(be->blkif);
841 		break;
842 
843 	case XenbusStateClosing:
844 		xenbus_switch_state(dev, XenbusStateClosing);
845 		break;
846 
847 	case XenbusStateClosed:
848 		xen_blkif_disconnect(be->blkif);
849 		xenbus_switch_state(dev, XenbusStateClosed);
850 		if (xenbus_dev_is_online(dev))
851 			break;
852 		fallthrough;
853 		/* if not online */
854 	case XenbusStateUnknown:
855 		/* implies xen_blkif_disconnect() via xen_blkbk_remove() */
856 		device_unregister(&dev->dev);
857 		break;
858 
859 	default:
860 		xenbus_dev_fatal(dev, -EINVAL, "saw state %d at frontend",
861 				 frontend_state);
862 		break;
863 	}
864 }
865 
866 /* Once a memory pressure is detected, squeeze free page pools for a while. */
867 static unsigned int buffer_squeeze_duration_ms = 10;
868 module_param_named(buffer_squeeze_duration_ms,
869 		buffer_squeeze_duration_ms, int, 0644);
870 MODULE_PARM_DESC(buffer_squeeze_duration_ms,
871 "Duration in ms to squeeze pages buffer when a memory pressure is detected");
872 
873 /*
874  * Callback received when the memory pressure is detected.
875  */
reclaim_memory(struct xenbus_device * dev)876 static void reclaim_memory(struct xenbus_device *dev)
877 {
878 	struct backend_info *be = dev_get_drvdata(&dev->dev);
879 
880 	if (!be)
881 		return;
882 	be->blkif->buffer_squeeze_end = jiffies +
883 		msecs_to_jiffies(buffer_squeeze_duration_ms);
884 }
885 
886 /* ** Connection ** */
887 
888 /*
889  * Write the physical details regarding the block device to the store, and
890  * switch to Connected state.
891  */
connect(struct backend_info * be)892 static void connect(struct backend_info *be)
893 {
894 	struct xenbus_transaction xbt;
895 	int err;
896 	struct xenbus_device *dev = be->dev;
897 
898 	pr_debug("%s %s\n", __func__, dev->otherend);
899 
900 	/* Supply the information about the device the frontend needs */
901 again:
902 	err = xenbus_transaction_start(&xbt);
903 	if (err) {
904 		xenbus_dev_fatal(dev, err, "starting transaction");
905 		return;
906 	}
907 
908 	/* If we can't advertise it is OK. */
909 	xen_blkbk_flush_diskcache(xbt, be, be->blkif->vbd.flush_support);
910 
911 	xen_blkbk_discard(xbt, be);
912 
913 	xen_blkbk_barrier(xbt, be, be->blkif->vbd.flush_support);
914 
915 	err = xenbus_printf(xbt, dev->nodename, "feature-persistent", "%u",
916 			be->blkif->vbd.feature_gnt_persistent);
917 	if (err) {
918 		xenbus_dev_fatal(dev, err, "writing %s/feature-persistent",
919 				 dev->nodename);
920 		goto abort;
921 	}
922 
923 	err = xenbus_printf(xbt, dev->nodename, "sectors", "%llu",
924 			    (unsigned long long)vbd_sz(&be->blkif->vbd));
925 	if (err) {
926 		xenbus_dev_fatal(dev, err, "writing %s/sectors",
927 				 dev->nodename);
928 		goto abort;
929 	}
930 
931 	/* FIXME: use a typename instead */
932 	err = xenbus_printf(xbt, dev->nodename, "info", "%u",
933 			    be->blkif->vbd.type |
934 			    (be->blkif->vbd.readonly ? VDISK_READONLY : 0));
935 	if (err) {
936 		xenbus_dev_fatal(dev, err, "writing %s/info",
937 				 dev->nodename);
938 		goto abort;
939 	}
940 	err = xenbus_printf(xbt, dev->nodename, "sector-size", "%lu",
941 			    (unsigned long)
942 			    bdev_logical_block_size(be->blkif->vbd.bdev));
943 	if (err) {
944 		xenbus_dev_fatal(dev, err, "writing %s/sector-size",
945 				 dev->nodename);
946 		goto abort;
947 	}
948 	err = xenbus_printf(xbt, dev->nodename, "physical-sector-size", "%u",
949 			    bdev_physical_block_size(be->blkif->vbd.bdev));
950 	if (err)
951 		xenbus_dev_error(dev, err, "writing %s/physical-sector-size",
952 				 dev->nodename);
953 
954 	err = xenbus_transaction_end(xbt, 0);
955 	if (err == -EAGAIN)
956 		goto again;
957 	if (err)
958 		xenbus_dev_fatal(dev, err, "ending transaction");
959 
960 	err = xenbus_switch_state(dev, XenbusStateConnected);
961 	if (err)
962 		xenbus_dev_fatal(dev, err, "%s: switching to Connected state",
963 				 dev->nodename);
964 
965 	return;
966  abort:
967 	xenbus_transaction_end(xbt, 1);
968 }
969 
970 /*
971  * Each ring may have multi pages, depends on "ring-page-order".
972  */
read_per_ring_refs(struct xen_blkif_ring * ring,const char * dir)973 static int read_per_ring_refs(struct xen_blkif_ring *ring, const char *dir)
974 {
975 	unsigned int ring_ref[XENBUS_MAX_RING_GRANTS];
976 	struct pending_req *req, *n;
977 	int err, i, j;
978 	struct xen_blkif *blkif = ring->blkif;
979 	struct xenbus_device *dev = blkif->be->dev;
980 	unsigned int nr_grefs, evtchn;
981 
982 	err = xenbus_scanf(XBT_NIL, dir, "event-channel", "%u",
983 			  &evtchn);
984 	if (err != 1) {
985 		err = -EINVAL;
986 		xenbus_dev_fatal(dev, err, "reading %s/event-channel", dir);
987 		return err;
988 	}
989 
990 	nr_grefs = blkif->nr_ring_pages;
991 
992 	if (unlikely(!nr_grefs)) {
993 		WARN_ON(true);
994 		return -EINVAL;
995 	}
996 
997 	for (i = 0; i < nr_grefs; i++) {
998 		char ring_ref_name[RINGREF_NAME_LEN];
999 
1000 		if (blkif->multi_ref)
1001 			snprintf(ring_ref_name, RINGREF_NAME_LEN, "ring-ref%u", i);
1002 		else {
1003 			WARN_ON(i != 0);
1004 			snprintf(ring_ref_name, RINGREF_NAME_LEN, "ring-ref");
1005 		}
1006 
1007 		err = xenbus_scanf(XBT_NIL, dir, ring_ref_name,
1008 				   "%u", &ring_ref[i]);
1009 
1010 		if (err != 1) {
1011 			err = -EINVAL;
1012 			xenbus_dev_fatal(dev, err, "reading %s/%s",
1013 					 dir, ring_ref_name);
1014 			return err;
1015 		}
1016 	}
1017 
1018 	err = -ENOMEM;
1019 	for (i = 0; i < nr_grefs * XEN_BLKIF_REQS_PER_PAGE; i++) {
1020 		req = kzalloc(sizeof(*req), GFP_KERNEL);
1021 		if (!req)
1022 			goto fail;
1023 		list_add_tail(&req->free_list, &ring->pending_free);
1024 		for (j = 0; j < MAX_INDIRECT_SEGMENTS; j++) {
1025 			req->segments[j] = kzalloc(sizeof(*req->segments[0]), GFP_KERNEL);
1026 			if (!req->segments[j])
1027 				goto fail;
1028 		}
1029 		for (j = 0; j < MAX_INDIRECT_PAGES; j++) {
1030 			req->indirect_pages[j] = kzalloc(sizeof(*req->indirect_pages[0]),
1031 							 GFP_KERNEL);
1032 			if (!req->indirect_pages[j])
1033 				goto fail;
1034 		}
1035 	}
1036 
1037 	/* Map the shared frame, irq etc. */
1038 	err = xen_blkif_map(ring, ring_ref, nr_grefs, evtchn);
1039 	if (err) {
1040 		xenbus_dev_fatal(dev, err, "mapping ring-ref port %u", evtchn);
1041 		goto fail;
1042 	}
1043 
1044 	return 0;
1045 
1046 fail:
1047 	list_for_each_entry_safe(req, n, &ring->pending_free, free_list) {
1048 		list_del(&req->free_list);
1049 		for (j = 0; j < MAX_INDIRECT_SEGMENTS; j++) {
1050 			if (!req->segments[j])
1051 				break;
1052 			kfree(req->segments[j]);
1053 		}
1054 		for (j = 0; j < MAX_INDIRECT_PAGES; j++) {
1055 			if (!req->indirect_pages[j])
1056 				break;
1057 			kfree(req->indirect_pages[j]);
1058 		}
1059 		kfree(req);
1060 	}
1061 	return err;
1062 }
1063 
connect_ring(struct backend_info * be)1064 static int connect_ring(struct backend_info *be)
1065 {
1066 	struct xenbus_device *dev = be->dev;
1067 	struct xen_blkif *blkif = be->blkif;
1068 	char protocol[64] = "";
1069 	int err, i;
1070 	char *xspath;
1071 	size_t xspathsize;
1072 	const size_t xenstore_path_ext_size = 11; /* sufficient for "/queue-NNN" */
1073 	unsigned int requested_num_queues = 0;
1074 	unsigned int ring_page_order;
1075 
1076 	pr_debug("%s %s\n", __func__, dev->otherend);
1077 
1078 	blkif->blk_protocol = BLKIF_PROTOCOL_DEFAULT;
1079 	err = xenbus_scanf(XBT_NIL, dev->otherend, "protocol",
1080 			   "%63s", protocol);
1081 	if (err <= 0)
1082 		strcpy(protocol, "unspecified, assuming default");
1083 	else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_NATIVE))
1084 		blkif->blk_protocol = BLKIF_PROTOCOL_NATIVE;
1085 	else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_X86_32))
1086 		blkif->blk_protocol = BLKIF_PROTOCOL_X86_32;
1087 	else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_X86_64))
1088 		blkif->blk_protocol = BLKIF_PROTOCOL_X86_64;
1089 	else {
1090 		xenbus_dev_fatal(dev, err, "unknown fe protocol %s", protocol);
1091 		return -ENOSYS;
1092 	}
1093 	if (blkif->vbd.feature_gnt_persistent)
1094 		blkif->vbd.feature_gnt_persistent =
1095 			xenbus_read_unsigned(dev->otherend,
1096 					"feature-persistent", 0);
1097 
1098 	blkif->vbd.overflow_max_grants = 0;
1099 
1100 	/*
1101 	 * Read the number of hardware queues from frontend.
1102 	 */
1103 	requested_num_queues = xenbus_read_unsigned(dev->otherend,
1104 						    "multi-queue-num-queues",
1105 						    1);
1106 	if (requested_num_queues > xenblk_max_queues
1107 	    || requested_num_queues == 0) {
1108 		/* Buggy or malicious guest. */
1109 		xenbus_dev_fatal(dev, err,
1110 				"guest requested %u queues, exceeding the maximum of %u.",
1111 				requested_num_queues, xenblk_max_queues);
1112 		return -ENOSYS;
1113 	}
1114 	blkif->nr_rings = requested_num_queues;
1115 	if (xen_blkif_alloc_rings(blkif))
1116 		return -ENOMEM;
1117 
1118 	pr_info("%s: using %d queues, protocol %d (%s) %s\n", dev->nodename,
1119 		 blkif->nr_rings, blkif->blk_protocol, protocol,
1120 		 blkif->vbd.feature_gnt_persistent ? "persistent grants" : "");
1121 
1122 	err = xenbus_scanf(XBT_NIL, dev->otherend, "ring-page-order", "%u",
1123 			   &ring_page_order);
1124 	if (err != 1) {
1125 		blkif->nr_ring_pages = 1;
1126 		blkif->multi_ref = false;
1127 	} else if (ring_page_order <= xen_blkif_max_ring_order) {
1128 		blkif->nr_ring_pages = 1 << ring_page_order;
1129 		blkif->multi_ref = true;
1130 	} else {
1131 		err = -EINVAL;
1132 		xenbus_dev_fatal(dev, err,
1133 				 "requested ring page order %d exceed max:%d",
1134 				 ring_page_order,
1135 				 xen_blkif_max_ring_order);
1136 		return err;
1137 	}
1138 
1139 	if (blkif->nr_rings == 1)
1140 		return read_per_ring_refs(&blkif->rings[0], dev->otherend);
1141 	else {
1142 		xspathsize = strlen(dev->otherend) + xenstore_path_ext_size;
1143 		xspath = kmalloc(xspathsize, GFP_KERNEL);
1144 		if (!xspath) {
1145 			xenbus_dev_fatal(dev, -ENOMEM, "reading ring references");
1146 			return -ENOMEM;
1147 		}
1148 
1149 		for (i = 0; i < blkif->nr_rings; i++) {
1150 			memset(xspath, 0, xspathsize);
1151 			snprintf(xspath, xspathsize, "%s/queue-%u", dev->otherend, i);
1152 			err = read_per_ring_refs(&blkif->rings[i], xspath);
1153 			if (err) {
1154 				kfree(xspath);
1155 				return err;
1156 			}
1157 		}
1158 		kfree(xspath);
1159 	}
1160 	return 0;
1161 }
1162 
1163 static const struct xenbus_device_id xen_blkbk_ids[] = {
1164 	{ "vbd" },
1165 	{ "" }
1166 };
1167 
1168 static struct xenbus_driver xen_blkbk_driver = {
1169 	.ids  = xen_blkbk_ids,
1170 	.probe = xen_blkbk_probe,
1171 	.remove = xen_blkbk_remove,
1172 	.otherend_changed = frontend_changed,
1173 	.allow_rebind = true,
1174 	.reclaim_memory = reclaim_memory,
1175 };
1176 
xen_blkif_xenbus_init(void)1177 int xen_blkif_xenbus_init(void)
1178 {
1179 	return xenbus_register_backend(&xen_blkbk_driver);
1180 }
1181 
xen_blkif_xenbus_fini(void)1182 void xen_blkif_xenbus_fini(void)
1183 {
1184 	xenbus_unregister_driver(&xen_blkbk_driver);
1185 }
1186