1 /******************************************************************************
2  *
3  * Back-end of the driver for virtual block devices. This portion of the
4  * driver exports a 'unified' block-device interface that can be accessed
5  * by any operating system that implements a compatible front end. A
6  * reference front-end implementation can be found in:
7  *  drivers/block/xen-blkfront.c
8  *
9  * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
10  * Copyright (c) 2005, Christopher Clark
11  *
12  * This program is free software; you can redistribute it and/or
13  * modify it under the terms of the GNU General Public License version 2
14  * as published by the Free Software Foundation; or, when distributed
15  * separately from the Linux kernel or incorporated into other
16  * software packages, subject to the following license:
17  *
18  * Permission is hereby granted, free of charge, to any person obtaining a copy
19  * of this source file (the "Software"), to deal in the Software without
20  * restriction, including without limitation the rights to use, copy, modify,
21  * merge, publish, distribute, sublicense, and/or sell copies of the Software,
22  * and to permit persons to whom the Software is furnished to do so, subject to
23  * the following conditions:
24  *
25  * The above copyright notice and this permission notice shall be included in
26  * all copies or substantial portions of the Software.
27  *
28  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
29  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
30  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
31  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
32  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
33  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
34  * IN THE SOFTWARE.
35  */
36 
37 #define pr_fmt(fmt) "xen-blkback: " fmt
38 
39 #include <linux/spinlock.h>
40 #include <linux/kthread.h>
41 #include <linux/list.h>
42 #include <linux/delay.h>
43 #include <linux/freezer.h>
44 #include <linux/bitmap.h>
45 
46 #include <xen/events.h>
47 #include <xen/page.h>
48 #include <xen/xen.h>
49 #include <asm/xen/hypervisor.h>
50 #include <asm/xen/hypercall.h>
51 #include <xen/balloon.h>
52 #include <xen/grant_table.h>
53 #include "common.h"
54 
55 /*
56  * Maximum number of unused free pages to keep in the internal buffer.
57  * Setting this to a value too low will reduce memory used in each backend,
58  * but can have a performance penalty.
59  *
60  * A sane value is xen_blkif_reqs * BLKIF_MAX_SEGMENTS_PER_REQUEST, but can
61  * be set to a lower value that might degrade performance on some intensive
62  * IO workloads.
63  */
64 
65 static int xen_blkif_max_buffer_pages = 1024;
66 module_param_named(max_buffer_pages, xen_blkif_max_buffer_pages, int, 0644);
67 MODULE_PARM_DESC(max_buffer_pages,
68 "Maximum number of free pages to keep in each block backend buffer");
69 
70 /*
71  * Maximum number of grants to map persistently in blkback. For maximum
72  * performance this should be the total numbers of grants that can be used
73  * to fill the ring, but since this might become too high, specially with
74  * the use of indirect descriptors, we set it to a value that provides good
75  * performance without using too much memory.
76  *
77  * When the list of persistent grants is full we clean it up using a LRU
78  * algorithm.
79  */
80 
81 static int xen_blkif_max_pgrants = 1056;
82 module_param_named(max_persistent_grants, xen_blkif_max_pgrants, int, 0644);
83 MODULE_PARM_DESC(max_persistent_grants,
84                  "Maximum number of grants to map persistently");
85 
86 /*
87  * How long a persistent grant is allowed to remain allocated without being in
88  * use. The time is in seconds, 0 means indefinitely long.
89  */
90 
91 static unsigned int xen_blkif_pgrant_timeout = 60;
92 module_param_named(persistent_grant_unused_seconds, xen_blkif_pgrant_timeout,
93 		   uint, 0644);
94 MODULE_PARM_DESC(persistent_grant_unused_seconds,
95 		 "Time in seconds an unused persistent grant is allowed to "
96 		 "remain allocated. Default is 60, 0 means unlimited.");
97 
98 /*
99  * Maximum number of rings/queues blkback supports, allow as many queues as there
100  * are CPUs if user has not specified a value.
101  */
102 unsigned int xenblk_max_queues;
103 module_param_named(max_queues, xenblk_max_queues, uint, 0644);
104 MODULE_PARM_DESC(max_queues,
105 		 "Maximum number of hardware queues per virtual disk." \
106 		 "By default it is the number of online CPUs.");
107 
108 /*
109  * Maximum order of pages to be used for the shared ring between front and
110  * backend, 4KB page granularity is used.
111  */
112 unsigned int xen_blkif_max_ring_order = XENBUS_MAX_RING_GRANT_ORDER;
113 module_param_named(max_ring_page_order, xen_blkif_max_ring_order, int, 0444);
114 MODULE_PARM_DESC(max_ring_page_order, "Maximum order of pages to be used for the shared ring");
115 /*
116  * The LRU mechanism to clean the lists of persistent grants needs to
117  * be executed periodically. The time interval between consecutive executions
118  * of the purge mechanism is set in ms.
119  */
120 #define LRU_INTERVAL 100
121 
122 /*
123  * When the persistent grants list is full we will remove unused grants
124  * from the list. The percent number of grants to be removed at each LRU
125  * execution.
126  */
127 #define LRU_PERCENT_CLEAN 5
128 
129 /* Run-time switchable: /sys/module/blkback/parameters/ */
130 static unsigned int log_stats;
131 module_param(log_stats, int, 0644);
132 
133 #define BLKBACK_INVALID_HANDLE (~0)
134 
135 /* Number of free pages to remove on each call to gnttab_free_pages */
136 #define NUM_BATCH_FREE_PAGES 10
137 
persistent_gnt_timeout(struct persistent_gnt * persistent_gnt)138 static inline bool persistent_gnt_timeout(struct persistent_gnt *persistent_gnt)
139 {
140 	return xen_blkif_pgrant_timeout &&
141 	       (jiffies - persistent_gnt->last_used >=
142 		HZ * xen_blkif_pgrant_timeout);
143 }
144 
get_free_page(struct xen_blkif_ring * ring,struct page ** page)145 static inline int get_free_page(struct xen_blkif_ring *ring, struct page **page)
146 {
147 	unsigned long flags;
148 
149 	spin_lock_irqsave(&ring->free_pages_lock, flags);
150 	if (list_empty(&ring->free_pages)) {
151 		BUG_ON(ring->free_pages_num != 0);
152 		spin_unlock_irqrestore(&ring->free_pages_lock, flags);
153 		return gnttab_alloc_pages(1, page);
154 	}
155 	BUG_ON(ring->free_pages_num == 0);
156 	page[0] = list_first_entry(&ring->free_pages, struct page, lru);
157 	list_del(&page[0]->lru);
158 	ring->free_pages_num--;
159 	spin_unlock_irqrestore(&ring->free_pages_lock, flags);
160 
161 	return 0;
162 }
163 
put_free_pages(struct xen_blkif_ring * ring,struct page ** page,int num)164 static inline void put_free_pages(struct xen_blkif_ring *ring, struct page **page,
165                                   int num)
166 {
167 	unsigned long flags;
168 	int i;
169 
170 	spin_lock_irqsave(&ring->free_pages_lock, flags);
171 	for (i = 0; i < num; i++)
172 		list_add(&page[i]->lru, &ring->free_pages);
173 	ring->free_pages_num += num;
174 	spin_unlock_irqrestore(&ring->free_pages_lock, flags);
175 }
176 
shrink_free_pagepool(struct xen_blkif_ring * ring,int num)177 static inline void shrink_free_pagepool(struct xen_blkif_ring *ring, int num)
178 {
179 	/* Remove requested pages in batches of NUM_BATCH_FREE_PAGES */
180 	struct page *page[NUM_BATCH_FREE_PAGES];
181 	unsigned int num_pages = 0;
182 	unsigned long flags;
183 
184 	spin_lock_irqsave(&ring->free_pages_lock, flags);
185 	while (ring->free_pages_num > num) {
186 		BUG_ON(list_empty(&ring->free_pages));
187 		page[num_pages] = list_first_entry(&ring->free_pages,
188 		                                   struct page, lru);
189 		list_del(&page[num_pages]->lru);
190 		ring->free_pages_num--;
191 		if (++num_pages == NUM_BATCH_FREE_PAGES) {
192 			spin_unlock_irqrestore(&ring->free_pages_lock, flags);
193 			gnttab_free_pages(num_pages, page);
194 			spin_lock_irqsave(&ring->free_pages_lock, flags);
195 			num_pages = 0;
196 		}
197 	}
198 	spin_unlock_irqrestore(&ring->free_pages_lock, flags);
199 	if (num_pages != 0)
200 		gnttab_free_pages(num_pages, page);
201 }
202 
203 #define vaddr(page) ((unsigned long)pfn_to_kaddr(page_to_pfn(page)))
204 
205 static int do_block_io_op(struct xen_blkif_ring *ring);
206 static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
207 				struct blkif_request *req,
208 				struct pending_req *pending_req);
209 static void make_response(struct xen_blkif_ring *ring, u64 id,
210 			  unsigned short op, int st);
211 
212 #define foreach_grant_safe(pos, n, rbtree, node) \
213 	for ((pos) = container_of(rb_first((rbtree)), typeof(*(pos)), node), \
214 	     (n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL; \
215 	     &(pos)->node != NULL; \
216 	     (pos) = container_of(n, typeof(*(pos)), node), \
217 	     (n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL)
218 
219 
220 /*
221  * We don't need locking around the persistent grant helpers
222  * because blkback uses a single-thread for each backend, so we
223  * can be sure that this functions will never be called recursively.
224  *
225  * The only exception to that is put_persistent_grant, that can be called
226  * from interrupt context (by xen_blkbk_unmap), so we have to use atomic
227  * bit operations to modify the flags of a persistent grant and to count
228  * the number of used grants.
229  */
add_persistent_gnt(struct xen_blkif_ring * ring,struct persistent_gnt * persistent_gnt)230 static int add_persistent_gnt(struct xen_blkif_ring *ring,
231 			       struct persistent_gnt *persistent_gnt)
232 {
233 	struct rb_node **new = NULL, *parent = NULL;
234 	struct persistent_gnt *this;
235 	struct xen_blkif *blkif = ring->blkif;
236 
237 	if (ring->persistent_gnt_c >= xen_blkif_max_pgrants) {
238 		if (!blkif->vbd.overflow_max_grants)
239 			blkif->vbd.overflow_max_grants = 1;
240 		return -EBUSY;
241 	}
242 	/* Figure out where to put new node */
243 	new = &ring->persistent_gnts.rb_node;
244 	while (*new) {
245 		this = container_of(*new, struct persistent_gnt, node);
246 
247 		parent = *new;
248 		if (persistent_gnt->gnt < this->gnt)
249 			new = &((*new)->rb_left);
250 		else if (persistent_gnt->gnt > this->gnt)
251 			new = &((*new)->rb_right);
252 		else {
253 			pr_alert_ratelimited("trying to add a gref that's already in the tree\n");
254 			return -EINVAL;
255 		}
256 	}
257 
258 	persistent_gnt->active = true;
259 	/* Add new node and rebalance tree. */
260 	rb_link_node(&(persistent_gnt->node), parent, new);
261 	rb_insert_color(&(persistent_gnt->node), &ring->persistent_gnts);
262 	ring->persistent_gnt_c++;
263 	atomic_inc(&ring->persistent_gnt_in_use);
264 	return 0;
265 }
266 
get_persistent_gnt(struct xen_blkif_ring * ring,grant_ref_t gref)267 static struct persistent_gnt *get_persistent_gnt(struct xen_blkif_ring *ring,
268 						 grant_ref_t gref)
269 {
270 	struct persistent_gnt *data;
271 	struct rb_node *node = NULL;
272 
273 	node = ring->persistent_gnts.rb_node;
274 	while (node) {
275 		data = container_of(node, struct persistent_gnt, node);
276 
277 		if (gref < data->gnt)
278 			node = node->rb_left;
279 		else if (gref > data->gnt)
280 			node = node->rb_right;
281 		else {
282 			if (data->active) {
283 				pr_alert_ratelimited("requesting a grant already in use\n");
284 				return NULL;
285 			}
286 			data->active = true;
287 			atomic_inc(&ring->persistent_gnt_in_use);
288 			return data;
289 		}
290 	}
291 	return NULL;
292 }
293 
put_persistent_gnt(struct xen_blkif_ring * ring,struct persistent_gnt * persistent_gnt)294 static void put_persistent_gnt(struct xen_blkif_ring *ring,
295                                struct persistent_gnt *persistent_gnt)
296 {
297 	if (!persistent_gnt->active)
298 		pr_alert_ratelimited("freeing a grant already unused\n");
299 	persistent_gnt->last_used = jiffies;
300 	persistent_gnt->active = false;
301 	atomic_dec(&ring->persistent_gnt_in_use);
302 }
303 
free_persistent_gnts(struct xen_blkif_ring * ring,struct rb_root * root,unsigned int num)304 static void free_persistent_gnts(struct xen_blkif_ring *ring, struct rb_root *root,
305                                  unsigned int num)
306 {
307 	struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
308 	struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
309 	struct persistent_gnt *persistent_gnt;
310 	struct rb_node *n;
311 	int segs_to_unmap = 0;
312 	struct gntab_unmap_queue_data unmap_data;
313 
314 	unmap_data.pages = pages;
315 	unmap_data.unmap_ops = unmap;
316 	unmap_data.kunmap_ops = NULL;
317 
318 	foreach_grant_safe(persistent_gnt, n, root, node) {
319 		BUG_ON(persistent_gnt->handle ==
320 			BLKBACK_INVALID_HANDLE);
321 		gnttab_set_unmap_op(&unmap[segs_to_unmap],
322 			(unsigned long) pfn_to_kaddr(page_to_pfn(
323 				persistent_gnt->page)),
324 			GNTMAP_host_map,
325 			persistent_gnt->handle);
326 
327 		pages[segs_to_unmap] = persistent_gnt->page;
328 
329 		if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST ||
330 			!rb_next(&persistent_gnt->node)) {
331 
332 			unmap_data.count = segs_to_unmap;
333 			BUG_ON(gnttab_unmap_refs_sync(&unmap_data));
334 
335 			put_free_pages(ring, pages, segs_to_unmap);
336 			segs_to_unmap = 0;
337 		}
338 
339 		rb_erase(&persistent_gnt->node, root);
340 		kfree(persistent_gnt);
341 		num--;
342 	}
343 	BUG_ON(num != 0);
344 }
345 
xen_blkbk_unmap_purged_grants(struct work_struct * work)346 void xen_blkbk_unmap_purged_grants(struct work_struct *work)
347 {
348 	struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
349 	struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
350 	struct persistent_gnt *persistent_gnt;
351 	int segs_to_unmap = 0;
352 	struct xen_blkif_ring *ring = container_of(work, typeof(*ring), persistent_purge_work);
353 	struct gntab_unmap_queue_data unmap_data;
354 
355 	unmap_data.pages = pages;
356 	unmap_data.unmap_ops = unmap;
357 	unmap_data.kunmap_ops = NULL;
358 
359 	while(!list_empty(&ring->persistent_purge_list)) {
360 		persistent_gnt = list_first_entry(&ring->persistent_purge_list,
361 		                                  struct persistent_gnt,
362 		                                  remove_node);
363 		list_del(&persistent_gnt->remove_node);
364 
365 		gnttab_set_unmap_op(&unmap[segs_to_unmap],
366 			vaddr(persistent_gnt->page),
367 			GNTMAP_host_map,
368 			persistent_gnt->handle);
369 
370 		pages[segs_to_unmap] = persistent_gnt->page;
371 
372 		if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
373 			unmap_data.count = segs_to_unmap;
374 			BUG_ON(gnttab_unmap_refs_sync(&unmap_data));
375 			put_free_pages(ring, pages, segs_to_unmap);
376 			segs_to_unmap = 0;
377 		}
378 		kfree(persistent_gnt);
379 	}
380 	if (segs_to_unmap > 0) {
381 		unmap_data.count = segs_to_unmap;
382 		BUG_ON(gnttab_unmap_refs_sync(&unmap_data));
383 		put_free_pages(ring, pages, segs_to_unmap);
384 	}
385 }
386 
purge_persistent_gnt(struct xen_blkif_ring * ring)387 static void purge_persistent_gnt(struct xen_blkif_ring *ring)
388 {
389 	struct persistent_gnt *persistent_gnt;
390 	struct rb_node *n;
391 	unsigned int num_clean, total;
392 	bool scan_used = false;
393 	struct rb_root *root;
394 
395 	if (work_busy(&ring->persistent_purge_work)) {
396 		pr_alert_ratelimited("Scheduled work from previous purge is still busy, cannot purge list\n");
397 		goto out;
398 	}
399 
400 	if (ring->persistent_gnt_c < xen_blkif_max_pgrants ||
401 	    (ring->persistent_gnt_c == xen_blkif_max_pgrants &&
402 	    !ring->blkif->vbd.overflow_max_grants)) {
403 		num_clean = 0;
404 	} else {
405 		num_clean = (xen_blkif_max_pgrants / 100) * LRU_PERCENT_CLEAN;
406 		num_clean = ring->persistent_gnt_c - xen_blkif_max_pgrants +
407 			    num_clean;
408 		num_clean = min(ring->persistent_gnt_c, num_clean);
409 		pr_debug("Going to purge at least %u persistent grants\n",
410 			 num_clean);
411 	}
412 
413 	/*
414 	 * At this point, we can assure that there will be no calls
415          * to get_persistent_grant (because we are executing this code from
416          * xen_blkif_schedule), there can only be calls to put_persistent_gnt,
417          * which means that the number of currently used grants will go down,
418          * but never up, so we will always be able to remove the requested
419          * number of grants.
420 	 */
421 
422 	total = 0;
423 
424 	BUG_ON(!list_empty(&ring->persistent_purge_list));
425 	root = &ring->persistent_gnts;
426 purge_list:
427 	foreach_grant_safe(persistent_gnt, n, root, node) {
428 		BUG_ON(persistent_gnt->handle ==
429 			BLKBACK_INVALID_HANDLE);
430 
431 		if (persistent_gnt->active)
432 			continue;
433 		if (!scan_used && !persistent_gnt_timeout(persistent_gnt))
434 			continue;
435 		if (scan_used && total >= num_clean)
436 			continue;
437 
438 		rb_erase(&persistent_gnt->node, root);
439 		list_add(&persistent_gnt->remove_node,
440 			 &ring->persistent_purge_list);
441 		total++;
442 	}
443 	/*
444 	 * Check whether we also need to start cleaning
445 	 * grants that were used since last purge in order to cope
446 	 * with the requested num
447 	 */
448 	if (!scan_used && total < num_clean) {
449 		pr_debug("Still missing %u purged frames\n", num_clean - total);
450 		scan_used = true;
451 		goto purge_list;
452 	}
453 
454 	if (total) {
455 		ring->persistent_gnt_c -= total;
456 		ring->blkif->vbd.overflow_max_grants = 0;
457 
458 		/* We can defer this work */
459 		schedule_work(&ring->persistent_purge_work);
460 		pr_debug("Purged %u/%u\n", num_clean, total);
461 	}
462 
463 out:
464 	return;
465 }
466 
467 /*
468  * Retrieve from the 'pending_reqs' a free pending_req structure to be used.
469  */
alloc_req(struct xen_blkif_ring * ring)470 static struct pending_req *alloc_req(struct xen_blkif_ring *ring)
471 {
472 	struct pending_req *req = NULL;
473 	unsigned long flags;
474 
475 	spin_lock_irqsave(&ring->pending_free_lock, flags);
476 	if (!list_empty(&ring->pending_free)) {
477 		req = list_entry(ring->pending_free.next, struct pending_req,
478 				 free_list);
479 		list_del(&req->free_list);
480 	}
481 	spin_unlock_irqrestore(&ring->pending_free_lock, flags);
482 	return req;
483 }
484 
485 /*
486  * Return the 'pending_req' structure back to the freepool. We also
487  * wake up the thread if it was waiting for a free page.
488  */
free_req(struct xen_blkif_ring * ring,struct pending_req * req)489 static void free_req(struct xen_blkif_ring *ring, struct pending_req *req)
490 {
491 	unsigned long flags;
492 	int was_empty;
493 
494 	spin_lock_irqsave(&ring->pending_free_lock, flags);
495 	was_empty = list_empty(&ring->pending_free);
496 	list_add(&req->free_list, &ring->pending_free);
497 	spin_unlock_irqrestore(&ring->pending_free_lock, flags);
498 	if (was_empty)
499 		wake_up(&ring->pending_free_wq);
500 }
501 
502 /*
503  * Routines for managing virtual block devices (vbds).
504  */
xen_vbd_translate(struct phys_req * req,struct xen_blkif * blkif,int operation)505 static int xen_vbd_translate(struct phys_req *req, struct xen_blkif *blkif,
506 			     int operation)
507 {
508 	struct xen_vbd *vbd = &blkif->vbd;
509 	int rc = -EACCES;
510 
511 	if ((operation != REQ_OP_READ) && vbd->readonly)
512 		goto out;
513 
514 	if (likely(req->nr_sects)) {
515 		blkif_sector_t end = req->sector_number + req->nr_sects;
516 
517 		if (unlikely(end < req->sector_number))
518 			goto out;
519 		if (unlikely(end > vbd_sz(vbd)))
520 			goto out;
521 	}
522 
523 	req->dev  = vbd->pdevice;
524 	req->bdev = vbd->bdev;
525 	rc = 0;
526 
527  out:
528 	return rc;
529 }
530 
xen_vbd_resize(struct xen_blkif * blkif)531 static void xen_vbd_resize(struct xen_blkif *blkif)
532 {
533 	struct xen_vbd *vbd = &blkif->vbd;
534 	struct xenbus_transaction xbt;
535 	int err;
536 	struct xenbus_device *dev = xen_blkbk_xenbus(blkif->be);
537 	unsigned long long new_size = vbd_sz(vbd);
538 
539 	pr_info("VBD Resize: Domid: %d, Device: (%d, %d)\n",
540 		blkif->domid, MAJOR(vbd->pdevice), MINOR(vbd->pdevice));
541 	pr_info("VBD Resize: new size %llu\n", new_size);
542 	vbd->size = new_size;
543 again:
544 	err = xenbus_transaction_start(&xbt);
545 	if (err) {
546 		pr_warn("Error starting transaction\n");
547 		return;
548 	}
549 	err = xenbus_printf(xbt, dev->nodename, "sectors", "%llu",
550 			    (unsigned long long)vbd_sz(vbd));
551 	if (err) {
552 		pr_warn("Error writing new size\n");
553 		goto abort;
554 	}
555 	/*
556 	 * Write the current state; we will use this to synchronize
557 	 * the front-end. If the current state is "connected" the
558 	 * front-end will get the new size information online.
559 	 */
560 	err = xenbus_printf(xbt, dev->nodename, "state", "%d", dev->state);
561 	if (err) {
562 		pr_warn("Error writing the state\n");
563 		goto abort;
564 	}
565 
566 	err = xenbus_transaction_end(xbt, 0);
567 	if (err == -EAGAIN)
568 		goto again;
569 	if (err)
570 		pr_warn("Error ending transaction\n");
571 	return;
572 abort:
573 	xenbus_transaction_end(xbt, 1);
574 }
575 
576 /*
577  * Notification from the guest OS.
578  */
blkif_notify_work(struct xen_blkif_ring * ring)579 static void blkif_notify_work(struct xen_blkif_ring *ring)
580 {
581 	ring->waiting_reqs = 1;
582 	wake_up(&ring->wq);
583 }
584 
xen_blkif_be_int(int irq,void * dev_id)585 irqreturn_t xen_blkif_be_int(int irq, void *dev_id)
586 {
587 	blkif_notify_work(dev_id);
588 	return IRQ_HANDLED;
589 }
590 
591 /*
592  * SCHEDULER FUNCTIONS
593  */
594 
print_stats(struct xen_blkif_ring * ring)595 static void print_stats(struct xen_blkif_ring *ring)
596 {
597 	pr_info("(%s): oo %3llu  |  rd %4llu  |  wr %4llu  |  f %4llu"
598 		 "  |  ds %4llu | pg: %4u/%4d\n",
599 		 current->comm, ring->st_oo_req,
600 		 ring->st_rd_req, ring->st_wr_req,
601 		 ring->st_f_req, ring->st_ds_req,
602 		 ring->persistent_gnt_c,
603 		 xen_blkif_max_pgrants);
604 	ring->st_print = jiffies + msecs_to_jiffies(10 * 1000);
605 	ring->st_rd_req = 0;
606 	ring->st_wr_req = 0;
607 	ring->st_oo_req = 0;
608 	ring->st_ds_req = 0;
609 }
610 
xen_blkif_schedule(void * arg)611 int xen_blkif_schedule(void *arg)
612 {
613 	struct xen_blkif_ring *ring = arg;
614 	struct xen_blkif *blkif = ring->blkif;
615 	struct xen_vbd *vbd = &blkif->vbd;
616 	unsigned long timeout;
617 	int ret;
618 
619 	set_freezable();
620 	while (!kthread_should_stop()) {
621 		if (try_to_freeze())
622 			continue;
623 		if (unlikely(vbd->size != vbd_sz(vbd)))
624 			xen_vbd_resize(blkif);
625 
626 		timeout = msecs_to_jiffies(LRU_INTERVAL);
627 
628 		timeout = wait_event_interruptible_timeout(
629 			ring->wq,
630 			ring->waiting_reqs || kthread_should_stop(),
631 			timeout);
632 		if (timeout == 0)
633 			goto purge_gnt_list;
634 		timeout = wait_event_interruptible_timeout(
635 			ring->pending_free_wq,
636 			!list_empty(&ring->pending_free) ||
637 			kthread_should_stop(),
638 			timeout);
639 		if (timeout == 0)
640 			goto purge_gnt_list;
641 
642 		ring->waiting_reqs = 0;
643 		smp_mb(); /* clear flag *before* checking for work */
644 
645 		ret = do_block_io_op(ring);
646 		if (ret > 0)
647 			ring->waiting_reqs = 1;
648 		if (ret == -EACCES)
649 			wait_event_interruptible(ring->shutdown_wq,
650 						 kthread_should_stop());
651 
652 purge_gnt_list:
653 		if (blkif->vbd.feature_gnt_persistent &&
654 		    time_after(jiffies, ring->next_lru)) {
655 			purge_persistent_gnt(ring);
656 			ring->next_lru = jiffies + msecs_to_jiffies(LRU_INTERVAL);
657 		}
658 
659 		/* Shrink if we have more than xen_blkif_max_buffer_pages */
660 		shrink_free_pagepool(ring, xen_blkif_max_buffer_pages);
661 
662 		if (log_stats && time_after(jiffies, ring->st_print))
663 			print_stats(ring);
664 	}
665 
666 	/* Drain pending purge work */
667 	flush_work(&ring->persistent_purge_work);
668 
669 	if (log_stats)
670 		print_stats(ring);
671 
672 	ring->xenblkd = NULL;
673 
674 	return 0;
675 }
676 
677 /*
678  * Remove persistent grants and empty the pool of free pages
679  */
xen_blkbk_free_caches(struct xen_blkif_ring * ring)680 void xen_blkbk_free_caches(struct xen_blkif_ring *ring)
681 {
682 	/* Free all persistent grant pages */
683 	if (!RB_EMPTY_ROOT(&ring->persistent_gnts))
684 		free_persistent_gnts(ring, &ring->persistent_gnts,
685 			ring->persistent_gnt_c);
686 
687 	BUG_ON(!RB_EMPTY_ROOT(&ring->persistent_gnts));
688 	ring->persistent_gnt_c = 0;
689 
690 	/* Since we are shutting down remove all pages from the buffer */
691 	shrink_free_pagepool(ring, 0 /* All */);
692 }
693 
xen_blkbk_unmap_prepare(struct xen_blkif_ring * ring,struct grant_page ** pages,unsigned int num,struct gnttab_unmap_grant_ref * unmap_ops,struct page ** unmap_pages)694 static unsigned int xen_blkbk_unmap_prepare(
695 	struct xen_blkif_ring *ring,
696 	struct grant_page **pages,
697 	unsigned int num,
698 	struct gnttab_unmap_grant_ref *unmap_ops,
699 	struct page **unmap_pages)
700 {
701 	unsigned int i, invcount = 0;
702 
703 	for (i = 0; i < num; i++) {
704 		if (pages[i]->persistent_gnt != NULL) {
705 			put_persistent_gnt(ring, pages[i]->persistent_gnt);
706 			continue;
707 		}
708 		if (pages[i]->handle == BLKBACK_INVALID_HANDLE)
709 			continue;
710 		unmap_pages[invcount] = pages[i]->page;
711 		gnttab_set_unmap_op(&unmap_ops[invcount], vaddr(pages[i]->page),
712 				    GNTMAP_host_map, pages[i]->handle);
713 		pages[i]->handle = BLKBACK_INVALID_HANDLE;
714 		invcount++;
715 	}
716 
717 	return invcount;
718 }
719 
xen_blkbk_unmap_and_respond_callback(int result,struct gntab_unmap_queue_data * data)720 static void xen_blkbk_unmap_and_respond_callback(int result, struct gntab_unmap_queue_data *data)
721 {
722 	struct pending_req *pending_req = (struct pending_req *)(data->data);
723 	struct xen_blkif_ring *ring = pending_req->ring;
724 	struct xen_blkif *blkif = ring->blkif;
725 
726 	/* BUG_ON used to reproduce existing behaviour,
727 	   but is this the best way to deal with this? */
728 	BUG_ON(result);
729 
730 	put_free_pages(ring, data->pages, data->count);
731 	make_response(ring, pending_req->id,
732 		      pending_req->operation, pending_req->status);
733 	free_req(ring, pending_req);
734 	/*
735 	 * Make sure the request is freed before releasing blkif,
736 	 * or there could be a race between free_req and the
737 	 * cleanup done in xen_blkif_free during shutdown.
738 	 *
739 	 * NB: The fact that we might try to wake up pending_free_wq
740 	 * before drain_complete (in case there's a drain going on)
741 	 * it's not a problem with our current implementation
742 	 * because we can assure there's no thread waiting on
743 	 * pending_free_wq if there's a drain going on, but it has
744 	 * to be taken into account if the current model is changed.
745 	 */
746 	if (atomic_dec_and_test(&ring->inflight) && atomic_read(&blkif->drain)) {
747 		complete(&blkif->drain_complete);
748 	}
749 	xen_blkif_put(blkif);
750 }
751 
xen_blkbk_unmap_and_respond(struct pending_req * req)752 static void xen_blkbk_unmap_and_respond(struct pending_req *req)
753 {
754 	struct gntab_unmap_queue_data* work = &req->gnttab_unmap_data;
755 	struct xen_blkif_ring *ring = req->ring;
756 	struct grant_page **pages = req->segments;
757 	unsigned int invcount;
758 
759 	invcount = xen_blkbk_unmap_prepare(ring, pages, req->nr_segs,
760 					   req->unmap, req->unmap_pages);
761 
762 	work->data = req;
763 	work->done = xen_blkbk_unmap_and_respond_callback;
764 	work->unmap_ops = req->unmap;
765 	work->kunmap_ops = NULL;
766 	work->pages = req->unmap_pages;
767 	work->count = invcount;
768 
769 	gnttab_unmap_refs_async(&req->gnttab_unmap_data);
770 }
771 
772 
773 /*
774  * Unmap the grant references.
775  *
776  * This could accumulate ops up to the batch size to reduce the number
777  * of hypercalls, but since this is only used in error paths there's
778  * no real need.
779  */
xen_blkbk_unmap(struct xen_blkif_ring * ring,struct grant_page * pages[],int num)780 static void xen_blkbk_unmap(struct xen_blkif_ring *ring,
781                             struct grant_page *pages[],
782                             int num)
783 {
784 	struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
785 	struct page *unmap_pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
786 	unsigned int invcount = 0;
787 	int ret;
788 
789 	while (num) {
790 		unsigned int batch = min(num, BLKIF_MAX_SEGMENTS_PER_REQUEST);
791 
792 		invcount = xen_blkbk_unmap_prepare(ring, pages, batch,
793 						   unmap, unmap_pages);
794 		if (invcount) {
795 			ret = gnttab_unmap_refs(unmap, NULL, unmap_pages, invcount);
796 			BUG_ON(ret);
797 			put_free_pages(ring, unmap_pages, invcount);
798 		}
799 		pages += batch;
800 		num -= batch;
801 	}
802 }
803 
xen_blkbk_map(struct xen_blkif_ring * ring,struct grant_page * pages[],int num,bool ro)804 static int xen_blkbk_map(struct xen_blkif_ring *ring,
805 			 struct grant_page *pages[],
806 			 int num, bool ro)
807 {
808 	struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST];
809 	struct page *pages_to_gnt[BLKIF_MAX_SEGMENTS_PER_REQUEST];
810 	struct persistent_gnt *persistent_gnt = NULL;
811 	phys_addr_t addr = 0;
812 	int i, seg_idx, new_map_idx;
813 	int segs_to_map = 0;
814 	int ret = 0;
815 	int last_map = 0, map_until = 0;
816 	int use_persistent_gnts;
817 	struct xen_blkif *blkif = ring->blkif;
818 
819 	use_persistent_gnts = (blkif->vbd.feature_gnt_persistent);
820 
821 	/*
822 	 * Fill out preq.nr_sects with proper amount of sectors, and setup
823 	 * assign map[..] with the PFN of the page in our domain with the
824 	 * corresponding grant reference for each page.
825 	 */
826 again:
827 	for (i = map_until; i < num; i++) {
828 		uint32_t flags;
829 
830 		if (use_persistent_gnts) {
831 			persistent_gnt = get_persistent_gnt(
832 				ring,
833 				pages[i]->gref);
834 		}
835 
836 		if (persistent_gnt) {
837 			/*
838 			 * We are using persistent grants and
839 			 * the grant is already mapped
840 			 */
841 			pages[i]->page = persistent_gnt->page;
842 			pages[i]->persistent_gnt = persistent_gnt;
843 		} else {
844 			if (get_free_page(ring, &pages[i]->page))
845 				goto out_of_memory;
846 			addr = vaddr(pages[i]->page);
847 			pages_to_gnt[segs_to_map] = pages[i]->page;
848 			pages[i]->persistent_gnt = NULL;
849 			flags = GNTMAP_host_map;
850 			if (!use_persistent_gnts && ro)
851 				flags |= GNTMAP_readonly;
852 			gnttab_set_map_op(&map[segs_to_map++], addr,
853 					  flags, pages[i]->gref,
854 					  blkif->domid);
855 		}
856 		map_until = i + 1;
857 		if (segs_to_map == BLKIF_MAX_SEGMENTS_PER_REQUEST)
858 			break;
859 	}
860 
861 	if (segs_to_map) {
862 		ret = gnttab_map_refs(map, NULL, pages_to_gnt, segs_to_map);
863 		BUG_ON(ret);
864 	}
865 
866 	/*
867 	 * Now swizzle the MFN in our domain with the MFN from the other domain
868 	 * so that when we access vaddr(pending_req,i) it has the contents of
869 	 * the page from the other domain.
870 	 */
871 	for (seg_idx = last_map, new_map_idx = 0; seg_idx < map_until; seg_idx++) {
872 		if (!pages[seg_idx]->persistent_gnt) {
873 			/* This is a newly mapped grant */
874 			BUG_ON(new_map_idx >= segs_to_map);
875 			if (unlikely(map[new_map_idx].status != 0)) {
876 				pr_debug("invalid buffer -- could not remap it\n");
877 				put_free_pages(ring, &pages[seg_idx]->page, 1);
878 				pages[seg_idx]->handle = BLKBACK_INVALID_HANDLE;
879 				ret |= 1;
880 				goto next;
881 			}
882 			pages[seg_idx]->handle = map[new_map_idx].handle;
883 		} else {
884 			continue;
885 		}
886 		if (use_persistent_gnts &&
887 		    ring->persistent_gnt_c < xen_blkif_max_pgrants) {
888 			/*
889 			 * We are using persistent grants, the grant is
890 			 * not mapped but we might have room for it.
891 			 */
892 			persistent_gnt = kmalloc(sizeof(struct persistent_gnt),
893 				                 GFP_KERNEL);
894 			if (!persistent_gnt) {
895 				/*
896 				 * If we don't have enough memory to
897 				 * allocate the persistent_gnt struct
898 				 * map this grant non-persistenly
899 				 */
900 				goto next;
901 			}
902 			persistent_gnt->gnt = map[new_map_idx].ref;
903 			persistent_gnt->handle = map[new_map_idx].handle;
904 			persistent_gnt->page = pages[seg_idx]->page;
905 			if (add_persistent_gnt(ring,
906 			                       persistent_gnt)) {
907 				kfree(persistent_gnt);
908 				persistent_gnt = NULL;
909 				goto next;
910 			}
911 			pages[seg_idx]->persistent_gnt = persistent_gnt;
912 			pr_debug("grant %u added to the tree of persistent grants, using %u/%u\n",
913 				 persistent_gnt->gnt, ring->persistent_gnt_c,
914 				 xen_blkif_max_pgrants);
915 			goto next;
916 		}
917 		if (use_persistent_gnts && !blkif->vbd.overflow_max_grants) {
918 			blkif->vbd.overflow_max_grants = 1;
919 			pr_debug("domain %u, device %#x is using maximum number of persistent grants\n",
920 			         blkif->domid, blkif->vbd.handle);
921 		}
922 		/*
923 		 * We could not map this grant persistently, so use it as
924 		 * a non-persistent grant.
925 		 */
926 next:
927 		new_map_idx++;
928 	}
929 	segs_to_map = 0;
930 	last_map = map_until;
931 	if (map_until != num)
932 		goto again;
933 
934 	return ret;
935 
936 out_of_memory:
937 	pr_alert("%s: out of memory\n", __func__);
938 	put_free_pages(ring, pages_to_gnt, segs_to_map);
939 	return -ENOMEM;
940 }
941 
xen_blkbk_map_seg(struct pending_req * pending_req)942 static int xen_blkbk_map_seg(struct pending_req *pending_req)
943 {
944 	int rc;
945 
946 	rc = xen_blkbk_map(pending_req->ring, pending_req->segments,
947 			   pending_req->nr_segs,
948 	                   (pending_req->operation != BLKIF_OP_READ));
949 
950 	return rc;
951 }
952 
xen_blkbk_parse_indirect(struct blkif_request * req,struct pending_req * pending_req,struct seg_buf seg[],struct phys_req * preq)953 static int xen_blkbk_parse_indirect(struct blkif_request *req,
954 				    struct pending_req *pending_req,
955 				    struct seg_buf seg[],
956 				    struct phys_req *preq)
957 {
958 	struct grant_page **pages = pending_req->indirect_pages;
959 	struct xen_blkif_ring *ring = pending_req->ring;
960 	int indirect_grefs, rc, n, nseg, i;
961 	struct blkif_request_segment *segments = NULL;
962 
963 	nseg = pending_req->nr_segs;
964 	indirect_grefs = INDIRECT_PAGES(nseg);
965 	BUG_ON(indirect_grefs > BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST);
966 
967 	for (i = 0; i < indirect_grefs; i++)
968 		pages[i]->gref = req->u.indirect.indirect_grefs[i];
969 
970 	rc = xen_blkbk_map(ring, pages, indirect_grefs, true);
971 	if (rc)
972 		goto unmap;
973 
974 	for (n = 0, i = 0; n < nseg; n++) {
975 		uint8_t first_sect, last_sect;
976 
977 		if ((n % SEGS_PER_INDIRECT_FRAME) == 0) {
978 			/* Map indirect segments */
979 			if (segments)
980 				kunmap_atomic(segments);
981 			segments = kmap_atomic(pages[n/SEGS_PER_INDIRECT_FRAME]->page);
982 		}
983 		i = n % SEGS_PER_INDIRECT_FRAME;
984 
985 		pending_req->segments[n]->gref = segments[i].gref;
986 
987 		first_sect = READ_ONCE(segments[i].first_sect);
988 		last_sect = READ_ONCE(segments[i].last_sect);
989 		if (last_sect >= (XEN_PAGE_SIZE >> 9) || last_sect < first_sect) {
990 			rc = -EINVAL;
991 			goto unmap;
992 		}
993 
994 		seg[n].nsec = last_sect - first_sect + 1;
995 		seg[n].offset = first_sect << 9;
996 		preq->nr_sects += seg[n].nsec;
997 	}
998 
999 unmap:
1000 	if (segments)
1001 		kunmap_atomic(segments);
1002 	xen_blkbk_unmap(ring, pages, indirect_grefs);
1003 	return rc;
1004 }
1005 
dispatch_discard_io(struct xen_blkif_ring * ring,struct blkif_request * req)1006 static int dispatch_discard_io(struct xen_blkif_ring *ring,
1007 				struct blkif_request *req)
1008 {
1009 	int err = 0;
1010 	int status = BLKIF_RSP_OKAY;
1011 	struct xen_blkif *blkif = ring->blkif;
1012 	struct block_device *bdev = blkif->vbd.bdev;
1013 	unsigned long secure;
1014 	struct phys_req preq;
1015 
1016 	xen_blkif_get(blkif);
1017 
1018 	preq.sector_number = req->u.discard.sector_number;
1019 	preq.nr_sects      = req->u.discard.nr_sectors;
1020 
1021 	err = xen_vbd_translate(&preq, blkif, REQ_OP_WRITE);
1022 	if (err) {
1023 		pr_warn("access denied: DISCARD [%llu->%llu] on dev=%04x\n",
1024 			preq.sector_number,
1025 			preq.sector_number + preq.nr_sects, blkif->vbd.pdevice);
1026 		goto fail_response;
1027 	}
1028 	ring->st_ds_req++;
1029 
1030 	secure = (blkif->vbd.discard_secure &&
1031 		 (req->u.discard.flag & BLKIF_DISCARD_SECURE)) ?
1032 		 BLKDEV_DISCARD_SECURE : 0;
1033 
1034 	err = blkdev_issue_discard(bdev, req->u.discard.sector_number,
1035 				   req->u.discard.nr_sectors,
1036 				   GFP_KERNEL, secure);
1037 fail_response:
1038 	if (err == -EOPNOTSUPP) {
1039 		pr_debug("discard op failed, not supported\n");
1040 		status = BLKIF_RSP_EOPNOTSUPP;
1041 	} else if (err)
1042 		status = BLKIF_RSP_ERROR;
1043 
1044 	make_response(ring, req->u.discard.id, req->operation, status);
1045 	xen_blkif_put(blkif);
1046 	return err;
1047 }
1048 
dispatch_other_io(struct xen_blkif_ring * ring,struct blkif_request * req,struct pending_req * pending_req)1049 static int dispatch_other_io(struct xen_blkif_ring *ring,
1050 			     struct blkif_request *req,
1051 			     struct pending_req *pending_req)
1052 {
1053 	free_req(ring, pending_req);
1054 	make_response(ring, req->u.other.id, req->operation,
1055 		      BLKIF_RSP_EOPNOTSUPP);
1056 	return -EIO;
1057 }
1058 
xen_blk_drain_io(struct xen_blkif_ring * ring)1059 static void xen_blk_drain_io(struct xen_blkif_ring *ring)
1060 {
1061 	struct xen_blkif *blkif = ring->blkif;
1062 
1063 	atomic_set(&blkif->drain, 1);
1064 	do {
1065 		if (atomic_read(&ring->inflight) == 0)
1066 			break;
1067 		wait_for_completion_interruptible_timeout(
1068 				&blkif->drain_complete, HZ);
1069 
1070 		if (!atomic_read(&blkif->drain))
1071 			break;
1072 	} while (!kthread_should_stop());
1073 	atomic_set(&blkif->drain, 0);
1074 }
1075 
__end_block_io_op(struct pending_req * pending_req,blk_status_t error)1076 static void __end_block_io_op(struct pending_req *pending_req,
1077 		blk_status_t error)
1078 {
1079 	/* An error fails the entire request. */
1080 	if (pending_req->operation == BLKIF_OP_FLUSH_DISKCACHE &&
1081 	    error == BLK_STS_NOTSUPP) {
1082 		pr_debug("flush diskcache op failed, not supported\n");
1083 		xen_blkbk_flush_diskcache(XBT_NIL, pending_req->ring->blkif->be, 0);
1084 		pending_req->status = BLKIF_RSP_EOPNOTSUPP;
1085 	} else if (pending_req->operation == BLKIF_OP_WRITE_BARRIER &&
1086 		   error == BLK_STS_NOTSUPP) {
1087 		pr_debug("write barrier op failed, not supported\n");
1088 		xen_blkbk_barrier(XBT_NIL, pending_req->ring->blkif->be, 0);
1089 		pending_req->status = BLKIF_RSP_EOPNOTSUPP;
1090 	} else if (error) {
1091 		pr_debug("Buffer not up-to-date at end of operation,"
1092 			 " error=%d\n", error);
1093 		pending_req->status = BLKIF_RSP_ERROR;
1094 	}
1095 
1096 	/*
1097 	 * If all of the bio's have completed it is time to unmap
1098 	 * the grant references associated with 'request' and provide
1099 	 * the proper response on the ring.
1100 	 */
1101 	if (atomic_dec_and_test(&pending_req->pendcnt))
1102 		xen_blkbk_unmap_and_respond(pending_req);
1103 }
1104 
1105 /*
1106  * bio callback.
1107  */
end_block_io_op(struct bio * bio)1108 static void end_block_io_op(struct bio *bio)
1109 {
1110 	__end_block_io_op(bio->bi_private, bio->bi_status);
1111 	bio_put(bio);
1112 }
1113 
1114 
1115 
1116 /*
1117  * Function to copy the from the ring buffer the 'struct blkif_request'
1118  * (which has the sectors we want, number of them, grant references, etc),
1119  * and transmute  it to the block API to hand it over to the proper block disk.
1120  */
1121 static int
__do_block_io_op(struct xen_blkif_ring * ring)1122 __do_block_io_op(struct xen_blkif_ring *ring)
1123 {
1124 	union blkif_back_rings *blk_rings = &ring->blk_rings;
1125 	struct blkif_request req;
1126 	struct pending_req *pending_req;
1127 	RING_IDX rc, rp;
1128 	int more_to_do = 0;
1129 
1130 	rc = blk_rings->common.req_cons;
1131 	rp = blk_rings->common.sring->req_prod;
1132 	rmb(); /* Ensure we see queued requests up to 'rp'. */
1133 
1134 	if (RING_REQUEST_PROD_OVERFLOW(&blk_rings->common, rp)) {
1135 		rc = blk_rings->common.rsp_prod_pvt;
1136 		pr_warn("Frontend provided bogus ring requests (%d - %d = %d). Halting ring processing on dev=%04x\n",
1137 			rp, rc, rp - rc, ring->blkif->vbd.pdevice);
1138 		return -EACCES;
1139 	}
1140 	while (rc != rp) {
1141 
1142 		if (RING_REQUEST_CONS_OVERFLOW(&blk_rings->common, rc))
1143 			break;
1144 
1145 		if (kthread_should_stop()) {
1146 			more_to_do = 1;
1147 			break;
1148 		}
1149 
1150 		pending_req = alloc_req(ring);
1151 		if (NULL == pending_req) {
1152 			ring->st_oo_req++;
1153 			more_to_do = 1;
1154 			break;
1155 		}
1156 
1157 		switch (ring->blkif->blk_protocol) {
1158 		case BLKIF_PROTOCOL_NATIVE:
1159 			memcpy(&req, RING_GET_REQUEST(&blk_rings->native, rc), sizeof(req));
1160 			break;
1161 		case BLKIF_PROTOCOL_X86_32:
1162 			blkif_get_x86_32_req(&req, RING_GET_REQUEST(&blk_rings->x86_32, rc));
1163 			break;
1164 		case BLKIF_PROTOCOL_X86_64:
1165 			blkif_get_x86_64_req(&req, RING_GET_REQUEST(&blk_rings->x86_64, rc));
1166 			break;
1167 		default:
1168 			BUG();
1169 		}
1170 		blk_rings->common.req_cons = ++rc; /* before make_response() */
1171 
1172 		/* Apply all sanity checks to /private copy/ of request. */
1173 		barrier();
1174 
1175 		switch (req.operation) {
1176 		case BLKIF_OP_READ:
1177 		case BLKIF_OP_WRITE:
1178 		case BLKIF_OP_WRITE_BARRIER:
1179 		case BLKIF_OP_FLUSH_DISKCACHE:
1180 		case BLKIF_OP_INDIRECT:
1181 			if (dispatch_rw_block_io(ring, &req, pending_req))
1182 				goto done;
1183 			break;
1184 		case BLKIF_OP_DISCARD:
1185 			free_req(ring, pending_req);
1186 			if (dispatch_discard_io(ring, &req))
1187 				goto done;
1188 			break;
1189 		default:
1190 			if (dispatch_other_io(ring, &req, pending_req))
1191 				goto done;
1192 			break;
1193 		}
1194 
1195 		/* Yield point for this unbounded loop. */
1196 		cond_resched();
1197 	}
1198 done:
1199 	return more_to_do;
1200 }
1201 
1202 static int
do_block_io_op(struct xen_blkif_ring * ring)1203 do_block_io_op(struct xen_blkif_ring *ring)
1204 {
1205 	union blkif_back_rings *blk_rings = &ring->blk_rings;
1206 	int more_to_do;
1207 
1208 	do {
1209 		more_to_do = __do_block_io_op(ring);
1210 		if (more_to_do)
1211 			break;
1212 
1213 		RING_FINAL_CHECK_FOR_REQUESTS(&blk_rings->common, more_to_do);
1214 	} while (more_to_do);
1215 
1216 	return more_to_do;
1217 }
1218 /*
1219  * Transmutation of the 'struct blkif_request' to a proper 'struct bio'
1220  * and call the 'submit_bio' to pass it to the underlying storage.
1221  */
dispatch_rw_block_io(struct xen_blkif_ring * ring,struct blkif_request * req,struct pending_req * pending_req)1222 static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
1223 				struct blkif_request *req,
1224 				struct pending_req *pending_req)
1225 {
1226 	struct phys_req preq;
1227 	struct seg_buf *seg = pending_req->seg;
1228 	unsigned int nseg;
1229 	struct bio *bio = NULL;
1230 	struct bio **biolist = pending_req->biolist;
1231 	int i, nbio = 0;
1232 	int operation;
1233 	int operation_flags = 0;
1234 	struct blk_plug plug;
1235 	bool drain = false;
1236 	struct grant_page **pages = pending_req->segments;
1237 	unsigned short req_operation;
1238 
1239 	req_operation = req->operation == BLKIF_OP_INDIRECT ?
1240 			req->u.indirect.indirect_op : req->operation;
1241 
1242 	if ((req->operation == BLKIF_OP_INDIRECT) &&
1243 	    (req_operation != BLKIF_OP_READ) &&
1244 	    (req_operation != BLKIF_OP_WRITE)) {
1245 		pr_debug("Invalid indirect operation (%u)\n", req_operation);
1246 		goto fail_response;
1247 	}
1248 
1249 	switch (req_operation) {
1250 	case BLKIF_OP_READ:
1251 		ring->st_rd_req++;
1252 		operation = REQ_OP_READ;
1253 		break;
1254 	case BLKIF_OP_WRITE:
1255 		ring->st_wr_req++;
1256 		operation = REQ_OP_WRITE;
1257 		operation_flags = REQ_SYNC | REQ_IDLE;
1258 		break;
1259 	case BLKIF_OP_WRITE_BARRIER:
1260 		drain = true;
1261 		/* fall through */
1262 	case BLKIF_OP_FLUSH_DISKCACHE:
1263 		ring->st_f_req++;
1264 		operation = REQ_OP_WRITE;
1265 		operation_flags = REQ_PREFLUSH;
1266 		break;
1267 	default:
1268 		operation = 0; /* make gcc happy */
1269 		goto fail_response;
1270 		break;
1271 	}
1272 
1273 	/* Check that the number of segments is sane. */
1274 	nseg = req->operation == BLKIF_OP_INDIRECT ?
1275 	       req->u.indirect.nr_segments : req->u.rw.nr_segments;
1276 
1277 	if (unlikely(nseg == 0 && operation_flags != REQ_PREFLUSH) ||
1278 	    unlikely((req->operation != BLKIF_OP_INDIRECT) &&
1279 		     (nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) ||
1280 	    unlikely((req->operation == BLKIF_OP_INDIRECT) &&
1281 		     (nseg > MAX_INDIRECT_SEGMENTS))) {
1282 		pr_debug("Bad number of segments in request (%d)\n", nseg);
1283 		/* Haven't submitted any bio's yet. */
1284 		goto fail_response;
1285 	}
1286 
1287 	preq.nr_sects      = 0;
1288 
1289 	pending_req->ring      = ring;
1290 	pending_req->id        = req->u.rw.id;
1291 	pending_req->operation = req_operation;
1292 	pending_req->status    = BLKIF_RSP_OKAY;
1293 	pending_req->nr_segs   = nseg;
1294 
1295 	if (req->operation != BLKIF_OP_INDIRECT) {
1296 		preq.dev               = req->u.rw.handle;
1297 		preq.sector_number     = req->u.rw.sector_number;
1298 		for (i = 0; i < nseg; i++) {
1299 			pages[i]->gref = req->u.rw.seg[i].gref;
1300 			seg[i].nsec = req->u.rw.seg[i].last_sect -
1301 				req->u.rw.seg[i].first_sect + 1;
1302 			seg[i].offset = (req->u.rw.seg[i].first_sect << 9);
1303 			if ((req->u.rw.seg[i].last_sect >= (XEN_PAGE_SIZE >> 9)) ||
1304 			    (req->u.rw.seg[i].last_sect <
1305 			     req->u.rw.seg[i].first_sect))
1306 				goto fail_response;
1307 			preq.nr_sects += seg[i].nsec;
1308 		}
1309 	} else {
1310 		preq.dev               = req->u.indirect.handle;
1311 		preq.sector_number     = req->u.indirect.sector_number;
1312 		if (xen_blkbk_parse_indirect(req, pending_req, seg, &preq))
1313 			goto fail_response;
1314 	}
1315 
1316 	if (xen_vbd_translate(&preq, ring->blkif, operation) != 0) {
1317 		pr_debug("access denied: %s of [%llu,%llu] on dev=%04x\n",
1318 			 operation == REQ_OP_READ ? "read" : "write",
1319 			 preq.sector_number,
1320 			 preq.sector_number + preq.nr_sects,
1321 			 ring->blkif->vbd.pdevice);
1322 		goto fail_response;
1323 	}
1324 
1325 	/*
1326 	 * This check _MUST_ be done after xen_vbd_translate as the preq.bdev
1327 	 * is set there.
1328 	 */
1329 	for (i = 0; i < nseg; i++) {
1330 		if (((int)preq.sector_number|(int)seg[i].nsec) &
1331 		    ((bdev_logical_block_size(preq.bdev) >> 9) - 1)) {
1332 			pr_debug("Misaligned I/O request from domain %d\n",
1333 				 ring->blkif->domid);
1334 			goto fail_response;
1335 		}
1336 	}
1337 
1338 	/* Wait on all outstanding I/O's and once that has been completed
1339 	 * issue the flush.
1340 	 */
1341 	if (drain)
1342 		xen_blk_drain_io(pending_req->ring);
1343 
1344 	/*
1345 	 * If we have failed at this point, we need to undo the M2P override,
1346 	 * set gnttab_set_unmap_op on all of the grant references and perform
1347 	 * the hypercall to unmap the grants - that is all done in
1348 	 * xen_blkbk_unmap.
1349 	 */
1350 	if (xen_blkbk_map_seg(pending_req))
1351 		goto fail_flush;
1352 
1353 	/*
1354 	 * This corresponding xen_blkif_put is done in __end_block_io_op, or
1355 	 * below (in "!bio") if we are handling a BLKIF_OP_DISCARD.
1356 	 */
1357 	xen_blkif_get(ring->blkif);
1358 	atomic_inc(&ring->inflight);
1359 
1360 	for (i = 0; i < nseg; i++) {
1361 		while ((bio == NULL) ||
1362 		       (bio_add_page(bio,
1363 				     pages[i]->page,
1364 				     seg[i].nsec << 9,
1365 				     seg[i].offset) == 0)) {
1366 
1367 			int nr_iovecs = min_t(int, (nseg-i), BIO_MAX_PAGES);
1368 			bio = bio_alloc(GFP_KERNEL, nr_iovecs);
1369 			if (unlikely(bio == NULL))
1370 				goto fail_put_bio;
1371 
1372 			biolist[nbio++] = bio;
1373 			bio_set_dev(bio, preq.bdev);
1374 			bio->bi_private = pending_req;
1375 			bio->bi_end_io  = end_block_io_op;
1376 			bio->bi_iter.bi_sector  = preq.sector_number;
1377 			bio_set_op_attrs(bio, operation, operation_flags);
1378 		}
1379 
1380 		preq.sector_number += seg[i].nsec;
1381 	}
1382 
1383 	/* This will be hit if the operation was a flush or discard. */
1384 	if (!bio) {
1385 		BUG_ON(operation_flags != REQ_PREFLUSH);
1386 
1387 		bio = bio_alloc(GFP_KERNEL, 0);
1388 		if (unlikely(bio == NULL))
1389 			goto fail_put_bio;
1390 
1391 		biolist[nbio++] = bio;
1392 		bio_set_dev(bio, preq.bdev);
1393 		bio->bi_private = pending_req;
1394 		bio->bi_end_io  = end_block_io_op;
1395 		bio_set_op_attrs(bio, operation, operation_flags);
1396 	}
1397 
1398 	atomic_set(&pending_req->pendcnt, nbio);
1399 	blk_start_plug(&plug);
1400 
1401 	for (i = 0; i < nbio; i++)
1402 		submit_bio(biolist[i]);
1403 
1404 	/* Let the I/Os go.. */
1405 	blk_finish_plug(&plug);
1406 
1407 	if (operation == REQ_OP_READ)
1408 		ring->st_rd_sect += preq.nr_sects;
1409 	else if (operation == REQ_OP_WRITE)
1410 		ring->st_wr_sect += preq.nr_sects;
1411 
1412 	return 0;
1413 
1414  fail_flush:
1415 	xen_blkbk_unmap(ring, pending_req->segments,
1416 	                pending_req->nr_segs);
1417  fail_response:
1418 	/* Haven't submitted any bio's yet. */
1419 	make_response(ring, req->u.rw.id, req_operation, BLKIF_RSP_ERROR);
1420 	free_req(ring, pending_req);
1421 	msleep(1); /* back off a bit */
1422 	return -EIO;
1423 
1424  fail_put_bio:
1425 	for (i = 0; i < nbio; i++)
1426 		bio_put(biolist[i]);
1427 	atomic_set(&pending_req->pendcnt, 1);
1428 	__end_block_io_op(pending_req, BLK_STS_RESOURCE);
1429 	msleep(1); /* back off a bit */
1430 	return -EIO;
1431 }
1432 
1433 
1434 
1435 /*
1436  * Put a response on the ring on how the operation fared.
1437  */
make_response(struct xen_blkif_ring * ring,u64 id,unsigned short op,int st)1438 static void make_response(struct xen_blkif_ring *ring, u64 id,
1439 			  unsigned short op, int st)
1440 {
1441 	struct blkif_response *resp;
1442 	unsigned long     flags;
1443 	union blkif_back_rings *blk_rings;
1444 	int notify;
1445 
1446 	spin_lock_irqsave(&ring->blk_ring_lock, flags);
1447 	blk_rings = &ring->blk_rings;
1448 	/* Place on the response ring for the relevant domain. */
1449 	switch (ring->blkif->blk_protocol) {
1450 	case BLKIF_PROTOCOL_NATIVE:
1451 		resp = RING_GET_RESPONSE(&blk_rings->native,
1452 					 blk_rings->native.rsp_prod_pvt);
1453 		break;
1454 	case BLKIF_PROTOCOL_X86_32:
1455 		resp = RING_GET_RESPONSE(&blk_rings->x86_32,
1456 					 blk_rings->x86_32.rsp_prod_pvt);
1457 		break;
1458 	case BLKIF_PROTOCOL_X86_64:
1459 		resp = RING_GET_RESPONSE(&blk_rings->x86_64,
1460 					 blk_rings->x86_64.rsp_prod_pvt);
1461 		break;
1462 	default:
1463 		BUG();
1464 	}
1465 
1466 	resp->id        = id;
1467 	resp->operation = op;
1468 	resp->status    = st;
1469 
1470 	blk_rings->common.rsp_prod_pvt++;
1471 	RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify);
1472 	spin_unlock_irqrestore(&ring->blk_ring_lock, flags);
1473 	if (notify)
1474 		notify_remote_via_irq(ring->irq);
1475 }
1476 
xen_blkif_init(void)1477 static int __init xen_blkif_init(void)
1478 {
1479 	int rc = 0;
1480 
1481 	if (!xen_domain())
1482 		return -ENODEV;
1483 
1484 	if (xen_blkif_max_ring_order > XENBUS_MAX_RING_GRANT_ORDER) {
1485 		pr_info("Invalid max_ring_order (%d), will use default max: %d.\n",
1486 			xen_blkif_max_ring_order, XENBUS_MAX_RING_GRANT_ORDER);
1487 		xen_blkif_max_ring_order = XENBUS_MAX_RING_GRANT_ORDER;
1488 	}
1489 
1490 	if (xenblk_max_queues == 0)
1491 		xenblk_max_queues = num_online_cpus();
1492 
1493 	rc = xen_blkif_interface_init();
1494 	if (rc)
1495 		goto failed_init;
1496 
1497 	rc = xen_blkif_xenbus_init();
1498 	if (rc)
1499 		goto failed_init;
1500 
1501  failed_init:
1502 	return rc;
1503 }
1504 
1505 module_init(xen_blkif_init);
1506 
1507 MODULE_LICENSE("Dual BSD/GPL");
1508 MODULE_ALIAS("xen-backend:vbd");
1509