1 
2 /*
3    rbd.c -- Export ceph rados objects as a Linux block device
4 
5 
6    based on drivers/block/osdblk.c:
7 
8    Copyright 2009 Red Hat, Inc.
9 
10    This program is free software; you can redistribute it and/or modify
11    it under the terms of the GNU General Public License as published by
12    the Free Software Foundation.
13 
14    This program is distributed in the hope that it will be useful,
15    but WITHOUT ANY WARRANTY; without even the implied warranty of
16    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17    GNU General Public License for more details.
18 
19    You should have received a copy of the GNU General Public License
20    along with this program; see the file COPYING.  If not, write to
21    the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
22 
23 
24 
25    For usage instructions, please refer to:
26 
27                  Documentation/ABI/testing/sysfs-bus-rbd
28 
29  */
30 
31 #include <linux/ceph/libceph.h>
32 #include <linux/ceph/osd_client.h>
33 #include <linux/ceph/mon_client.h>
34 #include <linux/ceph/cls_lock_client.h>
35 #include <linux/ceph/striper.h>
36 #include <linux/ceph/decode.h>
37 #include <linux/parser.h>
38 #include <linux/bsearch.h>
39 
40 #include <linux/kernel.h>
41 #include <linux/device.h>
42 #include <linux/module.h>
43 #include <linux/blk-mq.h>
44 #include <linux/fs.h>
45 #include <linux/blkdev.h>
46 #include <linux/slab.h>
47 #include <linux/idr.h>
48 #include <linux/workqueue.h>
49 
50 #include "rbd_types.h"
51 
52 #define RBD_DEBUG	/* Activate rbd_assert() calls */
53 
54 /*
55  * Increment the given counter and return its updated value.
56  * If the counter is already 0 it will not be incremented.
57  * If the counter is already at its maximum value returns
58  * -EINVAL without updating it.
59  */
atomic_inc_return_safe(atomic_t * v)60 static int atomic_inc_return_safe(atomic_t *v)
61 {
62 	unsigned int counter;
63 
64 	counter = (unsigned int)atomic_fetch_add_unless(v, 1, 0);
65 	if (counter <= (unsigned int)INT_MAX)
66 		return (int)counter;
67 
68 	atomic_dec(v);
69 
70 	return -EINVAL;
71 }
72 
73 /* Decrement the counter.  Return the resulting value, or -EINVAL */
atomic_dec_return_safe(atomic_t * v)74 static int atomic_dec_return_safe(atomic_t *v)
75 {
76 	int counter;
77 
78 	counter = atomic_dec_return(v);
79 	if (counter >= 0)
80 		return counter;
81 
82 	atomic_inc(v);
83 
84 	return -EINVAL;
85 }
86 
87 #define RBD_DRV_NAME "rbd"
88 
89 #define RBD_MINORS_PER_MAJOR		256
90 #define RBD_SINGLE_MAJOR_PART_SHIFT	4
91 
92 #define RBD_MAX_PARENT_CHAIN_LEN	16
93 
94 #define RBD_SNAP_DEV_NAME_PREFIX	"snap_"
95 #define RBD_MAX_SNAP_NAME_LEN	\
96 			(NAME_MAX - (sizeof (RBD_SNAP_DEV_NAME_PREFIX) - 1))
97 
98 #define RBD_MAX_SNAP_COUNT	510	/* allows max snapc to fit in 4KB */
99 
100 #define RBD_SNAP_HEAD_NAME	"-"
101 
102 #define	BAD_SNAP_INDEX	U32_MAX		/* invalid index into snap array */
103 
104 /* This allows a single page to hold an image name sent by OSD */
105 #define RBD_IMAGE_NAME_LEN_MAX	(PAGE_SIZE - sizeof (__le32) - 1)
106 #define RBD_IMAGE_ID_LEN_MAX	64
107 
108 #define RBD_OBJ_PREFIX_LEN_MAX	64
109 
110 #define RBD_NOTIFY_TIMEOUT	5	/* seconds */
111 #define RBD_RETRY_DELAY		msecs_to_jiffies(1000)
112 
113 /* Feature bits */
114 
115 #define RBD_FEATURE_LAYERING		(1ULL<<0)
116 #define RBD_FEATURE_STRIPINGV2		(1ULL<<1)
117 #define RBD_FEATURE_EXCLUSIVE_LOCK	(1ULL<<2)
118 #define RBD_FEATURE_DATA_POOL		(1ULL<<7)
119 #define RBD_FEATURE_OPERATIONS		(1ULL<<8)
120 
121 #define RBD_FEATURES_ALL	(RBD_FEATURE_LAYERING |		\
122 				 RBD_FEATURE_STRIPINGV2 |	\
123 				 RBD_FEATURE_EXCLUSIVE_LOCK |	\
124 				 RBD_FEATURE_DATA_POOL |	\
125 				 RBD_FEATURE_OPERATIONS)
126 
127 /* Features supported by this (client software) implementation. */
128 
129 #define RBD_FEATURES_SUPPORTED	(RBD_FEATURES_ALL)
130 
131 /*
132  * An RBD device name will be "rbd#", where the "rbd" comes from
133  * RBD_DRV_NAME above, and # is a unique integer identifier.
134  */
135 #define DEV_NAME_LEN		32
136 
137 /*
138  * block device image metadata (in-memory version)
139  */
140 struct rbd_image_header {
141 	/* These six fields never change for a given rbd image */
142 	char *object_prefix;
143 	__u8 obj_order;
144 	u64 stripe_unit;
145 	u64 stripe_count;
146 	s64 data_pool_id;
147 	u64 features;		/* Might be changeable someday? */
148 
149 	/* The remaining fields need to be updated occasionally */
150 	u64 image_size;
151 	struct ceph_snap_context *snapc;
152 	char *snap_names;	/* format 1 only */
153 	u64 *snap_sizes;	/* format 1 only */
154 };
155 
156 /*
157  * An rbd image specification.
158  *
159  * The tuple (pool_id, image_id, snap_id) is sufficient to uniquely
160  * identify an image.  Each rbd_dev structure includes a pointer to
161  * an rbd_spec structure that encapsulates this identity.
162  *
163  * Each of the id's in an rbd_spec has an associated name.  For a
164  * user-mapped image, the names are supplied and the id's associated
165  * with them are looked up.  For a layered image, a parent image is
166  * defined by the tuple, and the names are looked up.
167  *
168  * An rbd_dev structure contains a parent_spec pointer which is
169  * non-null if the image it represents is a child in a layered
170  * image.  This pointer will refer to the rbd_spec structure used
171  * by the parent rbd_dev for its own identity (i.e., the structure
172  * is shared between the parent and child).
173  *
174  * Since these structures are populated once, during the discovery
175  * phase of image construction, they are effectively immutable so
176  * we make no effort to synchronize access to them.
177  *
178  * Note that code herein does not assume the image name is known (it
179  * could be a null pointer).
180  */
181 struct rbd_spec {
182 	u64		pool_id;
183 	const char	*pool_name;
184 	const char	*pool_ns;	/* NULL if default, never "" */
185 
186 	const char	*image_id;
187 	const char	*image_name;
188 
189 	u64		snap_id;
190 	const char	*snap_name;
191 
192 	struct kref	kref;
193 };
194 
195 /*
196  * an instance of the client.  multiple devices may share an rbd client.
197  */
198 struct rbd_client {
199 	struct ceph_client	*client;
200 	struct kref		kref;
201 	struct list_head	node;
202 };
203 
204 struct rbd_img_request;
205 
206 enum obj_request_type {
207 	OBJ_REQUEST_NODATA = 1,
208 	OBJ_REQUEST_BIO,	/* pointer into provided bio (list) */
209 	OBJ_REQUEST_BVECS,	/* pointer into provided bio_vec array */
210 	OBJ_REQUEST_OWN_BVECS,	/* private bio_vec array, doesn't own pages */
211 };
212 
213 enum obj_operation_type {
214 	OBJ_OP_READ = 1,
215 	OBJ_OP_WRITE,
216 	OBJ_OP_DISCARD,
217 };
218 
219 /*
220  * Writes go through the following state machine to deal with
221  * layering:
222  *
223  *                       need copyup
224  * RBD_OBJ_WRITE_GUARD ---------------> RBD_OBJ_WRITE_COPYUP
225  *        |     ^                              |
226  *        v     \------------------------------/
227  *      done
228  *        ^
229  *        |
230  * RBD_OBJ_WRITE_FLAT
231  *
232  * Writes start in RBD_OBJ_WRITE_GUARD or _FLAT, depending on whether
233  * there is a parent or not.
234  */
235 enum rbd_obj_write_state {
236 	RBD_OBJ_WRITE_FLAT = 1,
237 	RBD_OBJ_WRITE_GUARD,
238 	RBD_OBJ_WRITE_COPYUP,
239 };
240 
241 struct rbd_obj_request {
242 	struct ceph_object_extent ex;
243 	union {
244 		bool			tried_parent;	/* for reads */
245 		enum rbd_obj_write_state write_state;	/* for writes */
246 	};
247 
248 	struct rbd_img_request	*img_request;
249 	struct ceph_file_extent	*img_extents;
250 	u32			num_img_extents;
251 
252 	union {
253 		struct ceph_bio_iter	bio_pos;
254 		struct {
255 			struct ceph_bvec_iter	bvec_pos;
256 			u32			bvec_count;
257 			u32			bvec_idx;
258 		};
259 	};
260 	struct bio_vec		*copyup_bvecs;
261 	u32			copyup_bvec_count;
262 
263 	struct ceph_osd_request	*osd_req;
264 
265 	u64			xferred;	/* bytes transferred */
266 	int			result;
267 
268 	struct kref		kref;
269 };
270 
271 enum img_req_flags {
272 	IMG_REQ_CHILD,		/* initiator: block = 0, child image = 1 */
273 	IMG_REQ_LAYERED,	/* ENOENT handling: normal = 0, layered = 1 */
274 };
275 
276 struct rbd_img_request {
277 	struct rbd_device	*rbd_dev;
278 	enum obj_operation_type	op_type;
279 	enum obj_request_type	data_type;
280 	unsigned long		flags;
281 	union {
282 		u64			snap_id;	/* for reads */
283 		struct ceph_snap_context *snapc;	/* for writes */
284 	};
285 	union {
286 		struct request		*rq;		/* block request */
287 		struct rbd_obj_request	*obj_request;	/* obj req initiator */
288 	};
289 	spinlock_t		completion_lock;
290 	u64			xferred;/* aggregate bytes transferred */
291 	int			result;	/* first nonzero obj_request result */
292 
293 	struct list_head	object_extents;	/* obj_req.ex structs */
294 	u32			obj_request_count;
295 	u32			pending_count;
296 
297 	struct kref		kref;
298 };
299 
300 #define for_each_obj_request(ireq, oreq) \
301 	list_for_each_entry(oreq, &(ireq)->object_extents, ex.oe_item)
302 #define for_each_obj_request_safe(ireq, oreq, n) \
303 	list_for_each_entry_safe(oreq, n, &(ireq)->object_extents, ex.oe_item)
304 
305 enum rbd_watch_state {
306 	RBD_WATCH_STATE_UNREGISTERED,
307 	RBD_WATCH_STATE_REGISTERED,
308 	RBD_WATCH_STATE_ERROR,
309 };
310 
311 enum rbd_lock_state {
312 	RBD_LOCK_STATE_UNLOCKED,
313 	RBD_LOCK_STATE_LOCKED,
314 	RBD_LOCK_STATE_RELEASING,
315 };
316 
317 /* WatchNotify::ClientId */
318 struct rbd_client_id {
319 	u64 gid;
320 	u64 handle;
321 };
322 
323 struct rbd_mapping {
324 	u64                     size;
325 	u64                     features;
326 };
327 
328 /*
329  * a single device
330  */
331 struct rbd_device {
332 	int			dev_id;		/* blkdev unique id */
333 
334 	int			major;		/* blkdev assigned major */
335 	int			minor;
336 	struct gendisk		*disk;		/* blkdev's gendisk and rq */
337 
338 	u32			image_format;	/* Either 1 or 2 */
339 	struct rbd_client	*rbd_client;
340 
341 	char			name[DEV_NAME_LEN]; /* blkdev name, e.g. rbd3 */
342 
343 	spinlock_t		lock;		/* queue, flags, open_count */
344 
345 	struct rbd_image_header	header;
346 	unsigned long		flags;		/* possibly lock protected */
347 	struct rbd_spec		*spec;
348 	struct rbd_options	*opts;
349 	char			*config_info;	/* add{,_single_major} string */
350 
351 	struct ceph_object_id	header_oid;
352 	struct ceph_object_locator header_oloc;
353 
354 	struct ceph_file_layout	layout;		/* used for all rbd requests */
355 
356 	struct mutex		watch_mutex;
357 	enum rbd_watch_state	watch_state;
358 	struct ceph_osd_linger_request *watch_handle;
359 	u64			watch_cookie;
360 	struct delayed_work	watch_dwork;
361 
362 	struct rw_semaphore	lock_rwsem;
363 	enum rbd_lock_state	lock_state;
364 	char			lock_cookie[32];
365 	struct rbd_client_id	owner_cid;
366 	struct work_struct	acquired_lock_work;
367 	struct work_struct	released_lock_work;
368 	struct delayed_work	lock_dwork;
369 	struct work_struct	unlock_work;
370 	wait_queue_head_t	lock_waitq;
371 
372 	struct workqueue_struct	*task_wq;
373 
374 	struct rbd_spec		*parent_spec;
375 	u64			parent_overlap;
376 	atomic_t		parent_ref;
377 	struct rbd_device	*parent;
378 
379 	/* Block layer tags. */
380 	struct blk_mq_tag_set	tag_set;
381 
382 	/* protects updating the header */
383 	struct rw_semaphore     header_rwsem;
384 
385 	struct rbd_mapping	mapping;
386 
387 	struct list_head	node;
388 
389 	/* sysfs related */
390 	struct device		dev;
391 	unsigned long		open_count;	/* protected by lock */
392 };
393 
394 /*
395  * Flag bits for rbd_dev->flags:
396  * - REMOVING (which is coupled with rbd_dev->open_count) is protected
397  *   by rbd_dev->lock
398  * - BLACKLISTED is protected by rbd_dev->lock_rwsem
399  */
400 enum rbd_dev_flags {
401 	RBD_DEV_FLAG_EXISTS,	/* mapped snapshot has not been deleted */
402 	RBD_DEV_FLAG_REMOVING,	/* this mapping is being removed */
403 	RBD_DEV_FLAG_BLACKLISTED, /* our ceph_client is blacklisted */
404 };
405 
406 static DEFINE_MUTEX(client_mutex);	/* Serialize client creation */
407 
408 static LIST_HEAD(rbd_dev_list);    /* devices */
409 static DEFINE_SPINLOCK(rbd_dev_list_lock);
410 
411 static LIST_HEAD(rbd_client_list);		/* clients */
412 static DEFINE_SPINLOCK(rbd_client_list_lock);
413 
414 /* Slab caches for frequently-allocated structures */
415 
416 static struct kmem_cache	*rbd_img_request_cache;
417 static struct kmem_cache	*rbd_obj_request_cache;
418 
419 static int rbd_major;
420 static DEFINE_IDA(rbd_dev_id_ida);
421 
422 static struct workqueue_struct *rbd_wq;
423 
424 /*
425  * single-major requires >= 0.75 version of userspace rbd utility.
426  */
427 static bool single_major = true;
428 module_param(single_major, bool, 0444);
429 MODULE_PARM_DESC(single_major, "Use a single major number for all rbd devices (default: true)");
430 
431 static ssize_t rbd_add(struct bus_type *bus, const char *buf,
432 		       size_t count);
433 static ssize_t rbd_remove(struct bus_type *bus, const char *buf,
434 			  size_t count);
435 static ssize_t rbd_add_single_major(struct bus_type *bus, const char *buf,
436 				    size_t count);
437 static ssize_t rbd_remove_single_major(struct bus_type *bus, const char *buf,
438 				       size_t count);
439 static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth);
440 
rbd_dev_id_to_minor(int dev_id)441 static int rbd_dev_id_to_minor(int dev_id)
442 {
443 	return dev_id << RBD_SINGLE_MAJOR_PART_SHIFT;
444 }
445 
minor_to_rbd_dev_id(int minor)446 static int minor_to_rbd_dev_id(int minor)
447 {
448 	return minor >> RBD_SINGLE_MAJOR_PART_SHIFT;
449 }
450 
__rbd_is_lock_owner(struct rbd_device * rbd_dev)451 static bool __rbd_is_lock_owner(struct rbd_device *rbd_dev)
452 {
453 	return rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED ||
454 	       rbd_dev->lock_state == RBD_LOCK_STATE_RELEASING;
455 }
456 
rbd_is_lock_owner(struct rbd_device * rbd_dev)457 static bool rbd_is_lock_owner(struct rbd_device *rbd_dev)
458 {
459 	bool is_lock_owner;
460 
461 	down_read(&rbd_dev->lock_rwsem);
462 	is_lock_owner = __rbd_is_lock_owner(rbd_dev);
463 	up_read(&rbd_dev->lock_rwsem);
464 	return is_lock_owner;
465 }
466 
rbd_supported_features_show(struct bus_type * bus,char * buf)467 static ssize_t rbd_supported_features_show(struct bus_type *bus, char *buf)
468 {
469 	return sprintf(buf, "0x%llx\n", RBD_FEATURES_SUPPORTED);
470 }
471 
472 static BUS_ATTR(add, 0200, NULL, rbd_add);
473 static BUS_ATTR(remove, 0200, NULL, rbd_remove);
474 static BUS_ATTR(add_single_major, 0200, NULL, rbd_add_single_major);
475 static BUS_ATTR(remove_single_major, 0200, NULL, rbd_remove_single_major);
476 static BUS_ATTR(supported_features, 0444, rbd_supported_features_show, NULL);
477 
478 static struct attribute *rbd_bus_attrs[] = {
479 	&bus_attr_add.attr,
480 	&bus_attr_remove.attr,
481 	&bus_attr_add_single_major.attr,
482 	&bus_attr_remove_single_major.attr,
483 	&bus_attr_supported_features.attr,
484 	NULL,
485 };
486 
rbd_bus_is_visible(struct kobject * kobj,struct attribute * attr,int index)487 static umode_t rbd_bus_is_visible(struct kobject *kobj,
488 				  struct attribute *attr, int index)
489 {
490 	if (!single_major &&
491 	    (attr == &bus_attr_add_single_major.attr ||
492 	     attr == &bus_attr_remove_single_major.attr))
493 		return 0;
494 
495 	return attr->mode;
496 }
497 
498 static const struct attribute_group rbd_bus_group = {
499 	.attrs = rbd_bus_attrs,
500 	.is_visible = rbd_bus_is_visible,
501 };
502 __ATTRIBUTE_GROUPS(rbd_bus);
503 
504 static struct bus_type rbd_bus_type = {
505 	.name		= "rbd",
506 	.bus_groups	= rbd_bus_groups,
507 };
508 
rbd_root_dev_release(struct device * dev)509 static void rbd_root_dev_release(struct device *dev)
510 {
511 }
512 
513 static struct device rbd_root_dev = {
514 	.init_name =    "rbd",
515 	.release =      rbd_root_dev_release,
516 };
517 
518 static __printf(2, 3)
rbd_warn(struct rbd_device * rbd_dev,const char * fmt,...)519 void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...)
520 {
521 	struct va_format vaf;
522 	va_list args;
523 
524 	va_start(args, fmt);
525 	vaf.fmt = fmt;
526 	vaf.va = &args;
527 
528 	if (!rbd_dev)
529 		printk(KERN_WARNING "%s: %pV\n", RBD_DRV_NAME, &vaf);
530 	else if (rbd_dev->disk)
531 		printk(KERN_WARNING "%s: %s: %pV\n",
532 			RBD_DRV_NAME, rbd_dev->disk->disk_name, &vaf);
533 	else if (rbd_dev->spec && rbd_dev->spec->image_name)
534 		printk(KERN_WARNING "%s: image %s: %pV\n",
535 			RBD_DRV_NAME, rbd_dev->spec->image_name, &vaf);
536 	else if (rbd_dev->spec && rbd_dev->spec->image_id)
537 		printk(KERN_WARNING "%s: id %s: %pV\n",
538 			RBD_DRV_NAME, rbd_dev->spec->image_id, &vaf);
539 	else	/* punt */
540 		printk(KERN_WARNING "%s: rbd_dev %p: %pV\n",
541 			RBD_DRV_NAME, rbd_dev, &vaf);
542 	va_end(args);
543 }
544 
545 #ifdef RBD_DEBUG
546 #define rbd_assert(expr)						\
547 		if (unlikely(!(expr))) {				\
548 			printk(KERN_ERR "\nAssertion failure in %s() "	\
549 						"at line %d:\n\n"	\
550 					"\trbd_assert(%s);\n\n",	\
551 					__func__, __LINE__, #expr);	\
552 			BUG();						\
553 		}
554 #else /* !RBD_DEBUG */
555 #  define rbd_assert(expr)	((void) 0)
556 #endif /* !RBD_DEBUG */
557 
558 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
559 
560 static int rbd_dev_refresh(struct rbd_device *rbd_dev);
561 static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev);
562 static int rbd_dev_header_info(struct rbd_device *rbd_dev);
563 static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev);
564 static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
565 					u64 snap_id);
566 static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
567 				u8 *order, u64 *snap_size);
568 static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
569 		u64 *snap_features);
570 
rbd_open(struct block_device * bdev,fmode_t mode)571 static int rbd_open(struct block_device *bdev, fmode_t mode)
572 {
573 	struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
574 	bool removing = false;
575 
576 	spin_lock_irq(&rbd_dev->lock);
577 	if (test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags))
578 		removing = true;
579 	else
580 		rbd_dev->open_count++;
581 	spin_unlock_irq(&rbd_dev->lock);
582 	if (removing)
583 		return -ENOENT;
584 
585 	(void) get_device(&rbd_dev->dev);
586 
587 	return 0;
588 }
589 
rbd_release(struct gendisk * disk,fmode_t mode)590 static void rbd_release(struct gendisk *disk, fmode_t mode)
591 {
592 	struct rbd_device *rbd_dev = disk->private_data;
593 	unsigned long open_count_before;
594 
595 	spin_lock_irq(&rbd_dev->lock);
596 	open_count_before = rbd_dev->open_count--;
597 	spin_unlock_irq(&rbd_dev->lock);
598 	rbd_assert(open_count_before > 0);
599 
600 	put_device(&rbd_dev->dev);
601 }
602 
rbd_ioctl_set_ro(struct rbd_device * rbd_dev,unsigned long arg)603 static int rbd_ioctl_set_ro(struct rbd_device *rbd_dev, unsigned long arg)
604 {
605 	int ro;
606 
607 	if (get_user(ro, (int __user *)arg))
608 		return -EFAULT;
609 
610 	/* Snapshots can't be marked read-write */
611 	if (rbd_dev->spec->snap_id != CEPH_NOSNAP && !ro)
612 		return -EROFS;
613 
614 	/* Let blkdev_roset() handle it */
615 	return -ENOTTY;
616 }
617 
rbd_ioctl(struct block_device * bdev,fmode_t mode,unsigned int cmd,unsigned long arg)618 static int rbd_ioctl(struct block_device *bdev, fmode_t mode,
619 			unsigned int cmd, unsigned long arg)
620 {
621 	struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
622 	int ret;
623 
624 	switch (cmd) {
625 	case BLKROSET:
626 		ret = rbd_ioctl_set_ro(rbd_dev, arg);
627 		break;
628 	default:
629 		ret = -ENOTTY;
630 	}
631 
632 	return ret;
633 }
634 
635 #ifdef CONFIG_COMPAT
rbd_compat_ioctl(struct block_device * bdev,fmode_t mode,unsigned int cmd,unsigned long arg)636 static int rbd_compat_ioctl(struct block_device *bdev, fmode_t mode,
637 				unsigned int cmd, unsigned long arg)
638 {
639 	return rbd_ioctl(bdev, mode, cmd, arg);
640 }
641 #endif /* CONFIG_COMPAT */
642 
643 static const struct block_device_operations rbd_bd_ops = {
644 	.owner			= THIS_MODULE,
645 	.open			= rbd_open,
646 	.release		= rbd_release,
647 	.ioctl			= rbd_ioctl,
648 #ifdef CONFIG_COMPAT
649 	.compat_ioctl		= rbd_compat_ioctl,
650 #endif
651 };
652 
653 /*
654  * Initialize an rbd client instance.  Success or not, this function
655  * consumes ceph_opts.  Caller holds client_mutex.
656  */
rbd_client_create(struct ceph_options * ceph_opts)657 static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts)
658 {
659 	struct rbd_client *rbdc;
660 	int ret = -ENOMEM;
661 
662 	dout("%s:\n", __func__);
663 	rbdc = kmalloc(sizeof(struct rbd_client), GFP_KERNEL);
664 	if (!rbdc)
665 		goto out_opt;
666 
667 	kref_init(&rbdc->kref);
668 	INIT_LIST_HEAD(&rbdc->node);
669 
670 	rbdc->client = ceph_create_client(ceph_opts, rbdc);
671 	if (IS_ERR(rbdc->client))
672 		goto out_rbdc;
673 	ceph_opts = NULL; /* Now rbdc->client is responsible for ceph_opts */
674 
675 	ret = ceph_open_session(rbdc->client);
676 	if (ret < 0)
677 		goto out_client;
678 
679 	spin_lock(&rbd_client_list_lock);
680 	list_add_tail(&rbdc->node, &rbd_client_list);
681 	spin_unlock(&rbd_client_list_lock);
682 
683 	dout("%s: rbdc %p\n", __func__, rbdc);
684 
685 	return rbdc;
686 out_client:
687 	ceph_destroy_client(rbdc->client);
688 out_rbdc:
689 	kfree(rbdc);
690 out_opt:
691 	if (ceph_opts)
692 		ceph_destroy_options(ceph_opts);
693 	dout("%s: error %d\n", __func__, ret);
694 
695 	return ERR_PTR(ret);
696 }
697 
__rbd_get_client(struct rbd_client * rbdc)698 static struct rbd_client *__rbd_get_client(struct rbd_client *rbdc)
699 {
700 	kref_get(&rbdc->kref);
701 
702 	return rbdc;
703 }
704 
705 /*
706  * Find a ceph client with specific addr and configuration.  If
707  * found, bump its reference count.
708  */
rbd_client_find(struct ceph_options * ceph_opts)709 static struct rbd_client *rbd_client_find(struct ceph_options *ceph_opts)
710 {
711 	struct rbd_client *client_node;
712 	bool found = false;
713 
714 	if (ceph_opts->flags & CEPH_OPT_NOSHARE)
715 		return NULL;
716 
717 	spin_lock(&rbd_client_list_lock);
718 	list_for_each_entry(client_node, &rbd_client_list, node) {
719 		if (!ceph_compare_options(ceph_opts, client_node->client)) {
720 			__rbd_get_client(client_node);
721 
722 			found = true;
723 			break;
724 		}
725 	}
726 	spin_unlock(&rbd_client_list_lock);
727 
728 	return found ? client_node : NULL;
729 }
730 
731 /*
732  * (Per device) rbd map options
733  */
734 enum {
735 	Opt_queue_depth,
736 	Opt_lock_timeout,
737 	Opt_last_int,
738 	/* int args above */
739 	Opt_pool_ns,
740 	Opt_last_string,
741 	/* string args above */
742 	Opt_read_only,
743 	Opt_read_write,
744 	Opt_lock_on_read,
745 	Opt_exclusive,
746 	Opt_notrim,
747 	Opt_err
748 };
749 
750 static match_table_t rbd_opts_tokens = {
751 	{Opt_queue_depth, "queue_depth=%d"},
752 	{Opt_lock_timeout, "lock_timeout=%d"},
753 	/* int args above */
754 	{Opt_pool_ns, "_pool_ns=%s"},
755 	/* string args above */
756 	{Opt_read_only, "read_only"},
757 	{Opt_read_only, "ro"},		/* Alternate spelling */
758 	{Opt_read_write, "read_write"},
759 	{Opt_read_write, "rw"},		/* Alternate spelling */
760 	{Opt_lock_on_read, "lock_on_read"},
761 	{Opt_exclusive, "exclusive"},
762 	{Opt_notrim, "notrim"},
763 	{Opt_err, NULL}
764 };
765 
766 struct rbd_options {
767 	int	queue_depth;
768 	unsigned long	lock_timeout;
769 	bool	read_only;
770 	bool	lock_on_read;
771 	bool	exclusive;
772 	bool	trim;
773 };
774 
775 #define RBD_QUEUE_DEPTH_DEFAULT	BLKDEV_MAX_RQ
776 #define RBD_LOCK_TIMEOUT_DEFAULT 0  /* no timeout */
777 #define RBD_READ_ONLY_DEFAULT	false
778 #define RBD_LOCK_ON_READ_DEFAULT false
779 #define RBD_EXCLUSIVE_DEFAULT	false
780 #define RBD_TRIM_DEFAULT	true
781 
782 struct parse_rbd_opts_ctx {
783 	struct rbd_spec		*spec;
784 	struct rbd_options	*opts;
785 };
786 
parse_rbd_opts_token(char * c,void * private)787 static int parse_rbd_opts_token(char *c, void *private)
788 {
789 	struct parse_rbd_opts_ctx *pctx = private;
790 	substring_t argstr[MAX_OPT_ARGS];
791 	int token, intval, ret;
792 
793 	token = match_token(c, rbd_opts_tokens, argstr);
794 	if (token < Opt_last_int) {
795 		ret = match_int(&argstr[0], &intval);
796 		if (ret < 0) {
797 			pr_err("bad option arg (not int) at '%s'\n", c);
798 			return ret;
799 		}
800 		dout("got int token %d val %d\n", token, intval);
801 	} else if (token > Opt_last_int && token < Opt_last_string) {
802 		dout("got string token %d val %s\n", token, argstr[0].from);
803 	} else {
804 		dout("got token %d\n", token);
805 	}
806 
807 	switch (token) {
808 	case Opt_queue_depth:
809 		if (intval < 1) {
810 			pr_err("queue_depth out of range\n");
811 			return -EINVAL;
812 		}
813 		pctx->opts->queue_depth = intval;
814 		break;
815 	case Opt_lock_timeout:
816 		/* 0 is "wait forever" (i.e. infinite timeout) */
817 		if (intval < 0 || intval > INT_MAX / 1000) {
818 			pr_err("lock_timeout out of range\n");
819 			return -EINVAL;
820 		}
821 		pctx->opts->lock_timeout = msecs_to_jiffies(intval * 1000);
822 		break;
823 	case Opt_pool_ns:
824 		kfree(pctx->spec->pool_ns);
825 		pctx->spec->pool_ns = match_strdup(argstr);
826 		if (!pctx->spec->pool_ns)
827 			return -ENOMEM;
828 		break;
829 	case Opt_read_only:
830 		pctx->opts->read_only = true;
831 		break;
832 	case Opt_read_write:
833 		pctx->opts->read_only = false;
834 		break;
835 	case Opt_lock_on_read:
836 		pctx->opts->lock_on_read = true;
837 		break;
838 	case Opt_exclusive:
839 		pctx->opts->exclusive = true;
840 		break;
841 	case Opt_notrim:
842 		pctx->opts->trim = false;
843 		break;
844 	default:
845 		/* libceph prints "bad option" msg */
846 		return -EINVAL;
847 	}
848 
849 	return 0;
850 }
851 
obj_op_name(enum obj_operation_type op_type)852 static char* obj_op_name(enum obj_operation_type op_type)
853 {
854 	switch (op_type) {
855 	case OBJ_OP_READ:
856 		return "read";
857 	case OBJ_OP_WRITE:
858 		return "write";
859 	case OBJ_OP_DISCARD:
860 		return "discard";
861 	default:
862 		return "???";
863 	}
864 }
865 
866 /*
867  * Destroy ceph client
868  *
869  * Caller must hold rbd_client_list_lock.
870  */
rbd_client_release(struct kref * kref)871 static void rbd_client_release(struct kref *kref)
872 {
873 	struct rbd_client *rbdc = container_of(kref, struct rbd_client, kref);
874 
875 	dout("%s: rbdc %p\n", __func__, rbdc);
876 	spin_lock(&rbd_client_list_lock);
877 	list_del(&rbdc->node);
878 	spin_unlock(&rbd_client_list_lock);
879 
880 	ceph_destroy_client(rbdc->client);
881 	kfree(rbdc);
882 }
883 
884 /*
885  * Drop reference to ceph client node. If it's not referenced anymore, release
886  * it.
887  */
rbd_put_client(struct rbd_client * rbdc)888 static void rbd_put_client(struct rbd_client *rbdc)
889 {
890 	if (rbdc)
891 		kref_put(&rbdc->kref, rbd_client_release);
892 }
893 
wait_for_latest_osdmap(struct ceph_client * client)894 static int wait_for_latest_osdmap(struct ceph_client *client)
895 {
896 	u64 newest_epoch;
897 	int ret;
898 
899 	ret = ceph_monc_get_version(&client->monc, "osdmap", &newest_epoch);
900 	if (ret)
901 		return ret;
902 
903 	if (client->osdc.osdmap->epoch >= newest_epoch)
904 		return 0;
905 
906 	ceph_osdc_maybe_request_map(&client->osdc);
907 	return ceph_monc_wait_osdmap(&client->monc, newest_epoch,
908 				     client->options->mount_timeout);
909 }
910 
911 /*
912  * Get a ceph client with specific addr and configuration, if one does
913  * not exist create it.  Either way, ceph_opts is consumed by this
914  * function.
915  */
rbd_get_client(struct ceph_options * ceph_opts)916 static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts)
917 {
918 	struct rbd_client *rbdc;
919 	int ret;
920 
921 	mutex_lock_nested(&client_mutex, SINGLE_DEPTH_NESTING);
922 	rbdc = rbd_client_find(ceph_opts);
923 	if (rbdc) {
924 		ceph_destroy_options(ceph_opts);
925 
926 		/*
927 		 * Using an existing client.  Make sure ->pg_pools is up to
928 		 * date before we look up the pool id in do_rbd_add().
929 		 */
930 		ret = wait_for_latest_osdmap(rbdc->client);
931 		if (ret) {
932 			rbd_warn(NULL, "failed to get latest osdmap: %d", ret);
933 			rbd_put_client(rbdc);
934 			rbdc = ERR_PTR(ret);
935 		}
936 	} else {
937 		rbdc = rbd_client_create(ceph_opts);
938 	}
939 	mutex_unlock(&client_mutex);
940 
941 	return rbdc;
942 }
943 
rbd_image_format_valid(u32 image_format)944 static bool rbd_image_format_valid(u32 image_format)
945 {
946 	return image_format == 1 || image_format == 2;
947 }
948 
rbd_dev_ondisk_valid(struct rbd_image_header_ondisk * ondisk)949 static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk *ondisk)
950 {
951 	size_t size;
952 	u32 snap_count;
953 
954 	/* The header has to start with the magic rbd header text */
955 	if (memcmp(&ondisk->text, RBD_HEADER_TEXT, sizeof (RBD_HEADER_TEXT)))
956 		return false;
957 
958 	/* The bio layer requires at least sector-sized I/O */
959 
960 	if (ondisk->options.order < SECTOR_SHIFT)
961 		return false;
962 
963 	/* If we use u64 in a few spots we may be able to loosen this */
964 
965 	if (ondisk->options.order > 8 * sizeof (int) - 1)
966 		return false;
967 
968 	/*
969 	 * The size of a snapshot header has to fit in a size_t, and
970 	 * that limits the number of snapshots.
971 	 */
972 	snap_count = le32_to_cpu(ondisk->snap_count);
973 	size = SIZE_MAX - sizeof (struct ceph_snap_context);
974 	if (snap_count > size / sizeof (__le64))
975 		return false;
976 
977 	/*
978 	 * Not only that, but the size of the entire the snapshot
979 	 * header must also be representable in a size_t.
980 	 */
981 	size -= snap_count * sizeof (__le64);
982 	if ((u64) size < le64_to_cpu(ondisk->snap_names_len))
983 		return false;
984 
985 	return true;
986 }
987 
988 /*
989  * returns the size of an object in the image
990  */
rbd_obj_bytes(struct rbd_image_header * header)991 static u32 rbd_obj_bytes(struct rbd_image_header *header)
992 {
993 	return 1U << header->obj_order;
994 }
995 
rbd_init_layout(struct rbd_device * rbd_dev)996 static void rbd_init_layout(struct rbd_device *rbd_dev)
997 {
998 	if (rbd_dev->header.stripe_unit == 0 ||
999 	    rbd_dev->header.stripe_count == 0) {
1000 		rbd_dev->header.stripe_unit = rbd_obj_bytes(&rbd_dev->header);
1001 		rbd_dev->header.stripe_count = 1;
1002 	}
1003 
1004 	rbd_dev->layout.stripe_unit = rbd_dev->header.stripe_unit;
1005 	rbd_dev->layout.stripe_count = rbd_dev->header.stripe_count;
1006 	rbd_dev->layout.object_size = rbd_obj_bytes(&rbd_dev->header);
1007 	rbd_dev->layout.pool_id = rbd_dev->header.data_pool_id == CEPH_NOPOOL ?
1008 			  rbd_dev->spec->pool_id : rbd_dev->header.data_pool_id;
1009 	RCU_INIT_POINTER(rbd_dev->layout.pool_ns, NULL);
1010 }
1011 
1012 /*
1013  * Fill an rbd image header with information from the given format 1
1014  * on-disk header.
1015  */
rbd_header_from_disk(struct rbd_device * rbd_dev,struct rbd_image_header_ondisk * ondisk)1016 static int rbd_header_from_disk(struct rbd_device *rbd_dev,
1017 				 struct rbd_image_header_ondisk *ondisk)
1018 {
1019 	struct rbd_image_header *header = &rbd_dev->header;
1020 	bool first_time = header->object_prefix == NULL;
1021 	struct ceph_snap_context *snapc;
1022 	char *object_prefix = NULL;
1023 	char *snap_names = NULL;
1024 	u64 *snap_sizes = NULL;
1025 	u32 snap_count;
1026 	int ret = -ENOMEM;
1027 	u32 i;
1028 
1029 	/* Allocate this now to avoid having to handle failure below */
1030 
1031 	if (first_time) {
1032 		object_prefix = kstrndup(ondisk->object_prefix,
1033 					 sizeof(ondisk->object_prefix),
1034 					 GFP_KERNEL);
1035 		if (!object_prefix)
1036 			return -ENOMEM;
1037 	}
1038 
1039 	/* Allocate the snapshot context and fill it in */
1040 
1041 	snap_count = le32_to_cpu(ondisk->snap_count);
1042 	snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
1043 	if (!snapc)
1044 		goto out_err;
1045 	snapc->seq = le64_to_cpu(ondisk->snap_seq);
1046 	if (snap_count) {
1047 		struct rbd_image_snap_ondisk *snaps;
1048 		u64 snap_names_len = le64_to_cpu(ondisk->snap_names_len);
1049 
1050 		/* We'll keep a copy of the snapshot names... */
1051 
1052 		if (snap_names_len > (u64)SIZE_MAX)
1053 			goto out_2big;
1054 		snap_names = kmalloc(snap_names_len, GFP_KERNEL);
1055 		if (!snap_names)
1056 			goto out_err;
1057 
1058 		/* ...as well as the array of their sizes. */
1059 		snap_sizes = kmalloc_array(snap_count,
1060 					   sizeof(*header->snap_sizes),
1061 					   GFP_KERNEL);
1062 		if (!snap_sizes)
1063 			goto out_err;
1064 
1065 		/*
1066 		 * Copy the names, and fill in each snapshot's id
1067 		 * and size.
1068 		 *
1069 		 * Note that rbd_dev_v1_header_info() guarantees the
1070 		 * ondisk buffer we're working with has
1071 		 * snap_names_len bytes beyond the end of the
1072 		 * snapshot id array, this memcpy() is safe.
1073 		 */
1074 		memcpy(snap_names, &ondisk->snaps[snap_count], snap_names_len);
1075 		snaps = ondisk->snaps;
1076 		for (i = 0; i < snap_count; i++) {
1077 			snapc->snaps[i] = le64_to_cpu(snaps[i].id);
1078 			snap_sizes[i] = le64_to_cpu(snaps[i].image_size);
1079 		}
1080 	}
1081 
1082 	/* We won't fail any more, fill in the header */
1083 
1084 	if (first_time) {
1085 		header->object_prefix = object_prefix;
1086 		header->obj_order = ondisk->options.order;
1087 		rbd_init_layout(rbd_dev);
1088 	} else {
1089 		ceph_put_snap_context(header->snapc);
1090 		kfree(header->snap_names);
1091 		kfree(header->snap_sizes);
1092 	}
1093 
1094 	/* The remaining fields always get updated (when we refresh) */
1095 
1096 	header->image_size = le64_to_cpu(ondisk->image_size);
1097 	header->snapc = snapc;
1098 	header->snap_names = snap_names;
1099 	header->snap_sizes = snap_sizes;
1100 
1101 	return 0;
1102 out_2big:
1103 	ret = -EIO;
1104 out_err:
1105 	kfree(snap_sizes);
1106 	kfree(snap_names);
1107 	ceph_put_snap_context(snapc);
1108 	kfree(object_prefix);
1109 
1110 	return ret;
1111 }
1112 
_rbd_dev_v1_snap_name(struct rbd_device * rbd_dev,u32 which)1113 static const char *_rbd_dev_v1_snap_name(struct rbd_device *rbd_dev, u32 which)
1114 {
1115 	const char *snap_name;
1116 
1117 	rbd_assert(which < rbd_dev->header.snapc->num_snaps);
1118 
1119 	/* Skip over names until we find the one we are looking for */
1120 
1121 	snap_name = rbd_dev->header.snap_names;
1122 	while (which--)
1123 		snap_name += strlen(snap_name) + 1;
1124 
1125 	return kstrdup(snap_name, GFP_KERNEL);
1126 }
1127 
1128 /*
1129  * Snapshot id comparison function for use with qsort()/bsearch().
1130  * Note that result is for snapshots in *descending* order.
1131  */
snapid_compare_reverse(const void * s1,const void * s2)1132 static int snapid_compare_reverse(const void *s1, const void *s2)
1133 {
1134 	u64 snap_id1 = *(u64 *)s1;
1135 	u64 snap_id2 = *(u64 *)s2;
1136 
1137 	if (snap_id1 < snap_id2)
1138 		return 1;
1139 	return snap_id1 == snap_id2 ? 0 : -1;
1140 }
1141 
1142 /*
1143  * Search a snapshot context to see if the given snapshot id is
1144  * present.
1145  *
1146  * Returns the position of the snapshot id in the array if it's found,
1147  * or BAD_SNAP_INDEX otherwise.
1148  *
1149  * Note: The snapshot array is in kept sorted (by the osd) in
1150  * reverse order, highest snapshot id first.
1151  */
rbd_dev_snap_index(struct rbd_device * rbd_dev,u64 snap_id)1152 static u32 rbd_dev_snap_index(struct rbd_device *rbd_dev, u64 snap_id)
1153 {
1154 	struct ceph_snap_context *snapc = rbd_dev->header.snapc;
1155 	u64 *found;
1156 
1157 	found = bsearch(&snap_id, &snapc->snaps, snapc->num_snaps,
1158 				sizeof (snap_id), snapid_compare_reverse);
1159 
1160 	return found ? (u32)(found - &snapc->snaps[0]) : BAD_SNAP_INDEX;
1161 }
1162 
rbd_dev_v1_snap_name(struct rbd_device * rbd_dev,u64 snap_id)1163 static const char *rbd_dev_v1_snap_name(struct rbd_device *rbd_dev,
1164 					u64 snap_id)
1165 {
1166 	u32 which;
1167 	const char *snap_name;
1168 
1169 	which = rbd_dev_snap_index(rbd_dev, snap_id);
1170 	if (which == BAD_SNAP_INDEX)
1171 		return ERR_PTR(-ENOENT);
1172 
1173 	snap_name = _rbd_dev_v1_snap_name(rbd_dev, which);
1174 	return snap_name ? snap_name : ERR_PTR(-ENOMEM);
1175 }
1176 
rbd_snap_name(struct rbd_device * rbd_dev,u64 snap_id)1177 static const char *rbd_snap_name(struct rbd_device *rbd_dev, u64 snap_id)
1178 {
1179 	if (snap_id == CEPH_NOSNAP)
1180 		return RBD_SNAP_HEAD_NAME;
1181 
1182 	rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1183 	if (rbd_dev->image_format == 1)
1184 		return rbd_dev_v1_snap_name(rbd_dev, snap_id);
1185 
1186 	return rbd_dev_v2_snap_name(rbd_dev, snap_id);
1187 }
1188 
rbd_snap_size(struct rbd_device * rbd_dev,u64 snap_id,u64 * snap_size)1189 static int rbd_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
1190 				u64 *snap_size)
1191 {
1192 	rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1193 	if (snap_id == CEPH_NOSNAP) {
1194 		*snap_size = rbd_dev->header.image_size;
1195 	} else if (rbd_dev->image_format == 1) {
1196 		u32 which;
1197 
1198 		which = rbd_dev_snap_index(rbd_dev, snap_id);
1199 		if (which == BAD_SNAP_INDEX)
1200 			return -ENOENT;
1201 
1202 		*snap_size = rbd_dev->header.snap_sizes[which];
1203 	} else {
1204 		u64 size = 0;
1205 		int ret;
1206 
1207 		ret = _rbd_dev_v2_snap_size(rbd_dev, snap_id, NULL, &size);
1208 		if (ret)
1209 			return ret;
1210 
1211 		*snap_size = size;
1212 	}
1213 	return 0;
1214 }
1215 
rbd_snap_features(struct rbd_device * rbd_dev,u64 snap_id,u64 * snap_features)1216 static int rbd_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
1217 			u64 *snap_features)
1218 {
1219 	rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1220 	if (snap_id == CEPH_NOSNAP) {
1221 		*snap_features = rbd_dev->header.features;
1222 	} else if (rbd_dev->image_format == 1) {
1223 		*snap_features = 0;	/* No features for format 1 */
1224 	} else {
1225 		u64 features = 0;
1226 		int ret;
1227 
1228 		ret = _rbd_dev_v2_snap_features(rbd_dev, snap_id, &features);
1229 		if (ret)
1230 			return ret;
1231 
1232 		*snap_features = features;
1233 	}
1234 	return 0;
1235 }
1236 
rbd_dev_mapping_set(struct rbd_device * rbd_dev)1237 static int rbd_dev_mapping_set(struct rbd_device *rbd_dev)
1238 {
1239 	u64 snap_id = rbd_dev->spec->snap_id;
1240 	u64 size = 0;
1241 	u64 features = 0;
1242 	int ret;
1243 
1244 	ret = rbd_snap_size(rbd_dev, snap_id, &size);
1245 	if (ret)
1246 		return ret;
1247 	ret = rbd_snap_features(rbd_dev, snap_id, &features);
1248 	if (ret)
1249 		return ret;
1250 
1251 	rbd_dev->mapping.size = size;
1252 	rbd_dev->mapping.features = features;
1253 
1254 	return 0;
1255 }
1256 
rbd_dev_mapping_clear(struct rbd_device * rbd_dev)1257 static void rbd_dev_mapping_clear(struct rbd_device *rbd_dev)
1258 {
1259 	rbd_dev->mapping.size = 0;
1260 	rbd_dev->mapping.features = 0;
1261 }
1262 
zero_bvec(struct bio_vec * bv)1263 static void zero_bvec(struct bio_vec *bv)
1264 {
1265 	void *buf;
1266 	unsigned long flags;
1267 
1268 	buf = bvec_kmap_irq(bv, &flags);
1269 	memset(buf, 0, bv->bv_len);
1270 	flush_dcache_page(bv->bv_page);
1271 	bvec_kunmap_irq(buf, &flags);
1272 }
1273 
zero_bios(struct ceph_bio_iter * bio_pos,u32 off,u32 bytes)1274 static void zero_bios(struct ceph_bio_iter *bio_pos, u32 off, u32 bytes)
1275 {
1276 	struct ceph_bio_iter it = *bio_pos;
1277 
1278 	ceph_bio_iter_advance(&it, off);
1279 	ceph_bio_iter_advance_step(&it, bytes, ({
1280 		zero_bvec(&bv);
1281 	}));
1282 }
1283 
zero_bvecs(struct ceph_bvec_iter * bvec_pos,u32 off,u32 bytes)1284 static void zero_bvecs(struct ceph_bvec_iter *bvec_pos, u32 off, u32 bytes)
1285 {
1286 	struct ceph_bvec_iter it = *bvec_pos;
1287 
1288 	ceph_bvec_iter_advance(&it, off);
1289 	ceph_bvec_iter_advance_step(&it, bytes, ({
1290 		zero_bvec(&bv);
1291 	}));
1292 }
1293 
1294 /*
1295  * Zero a range in @obj_req data buffer defined by a bio (list) or
1296  * (private) bio_vec array.
1297  *
1298  * @off is relative to the start of the data buffer.
1299  */
rbd_obj_zero_range(struct rbd_obj_request * obj_req,u32 off,u32 bytes)1300 static void rbd_obj_zero_range(struct rbd_obj_request *obj_req, u32 off,
1301 			       u32 bytes)
1302 {
1303 	switch (obj_req->img_request->data_type) {
1304 	case OBJ_REQUEST_BIO:
1305 		zero_bios(&obj_req->bio_pos, off, bytes);
1306 		break;
1307 	case OBJ_REQUEST_BVECS:
1308 	case OBJ_REQUEST_OWN_BVECS:
1309 		zero_bvecs(&obj_req->bvec_pos, off, bytes);
1310 		break;
1311 	default:
1312 		rbd_assert(0);
1313 	}
1314 }
1315 
1316 static void rbd_obj_request_destroy(struct kref *kref);
rbd_obj_request_put(struct rbd_obj_request * obj_request)1317 static void rbd_obj_request_put(struct rbd_obj_request *obj_request)
1318 {
1319 	rbd_assert(obj_request != NULL);
1320 	dout("%s: obj %p (was %d)\n", __func__, obj_request,
1321 		kref_read(&obj_request->kref));
1322 	kref_put(&obj_request->kref, rbd_obj_request_destroy);
1323 }
1324 
rbd_img_request_get(struct rbd_img_request * img_request)1325 static void rbd_img_request_get(struct rbd_img_request *img_request)
1326 {
1327 	dout("%s: img %p (was %d)\n", __func__, img_request,
1328 	     kref_read(&img_request->kref));
1329 	kref_get(&img_request->kref);
1330 }
1331 
1332 static void rbd_img_request_destroy(struct kref *kref);
rbd_img_request_put(struct rbd_img_request * img_request)1333 static void rbd_img_request_put(struct rbd_img_request *img_request)
1334 {
1335 	rbd_assert(img_request != NULL);
1336 	dout("%s: img %p (was %d)\n", __func__, img_request,
1337 		kref_read(&img_request->kref));
1338 	kref_put(&img_request->kref, rbd_img_request_destroy);
1339 }
1340 
rbd_img_obj_request_add(struct rbd_img_request * img_request,struct rbd_obj_request * obj_request)1341 static inline void rbd_img_obj_request_add(struct rbd_img_request *img_request,
1342 					struct rbd_obj_request *obj_request)
1343 {
1344 	rbd_assert(obj_request->img_request == NULL);
1345 
1346 	/* Image request now owns object's original reference */
1347 	obj_request->img_request = img_request;
1348 	img_request->obj_request_count++;
1349 	img_request->pending_count++;
1350 	dout("%s: img %p obj %p\n", __func__, img_request, obj_request);
1351 }
1352 
rbd_img_obj_request_del(struct rbd_img_request * img_request,struct rbd_obj_request * obj_request)1353 static inline void rbd_img_obj_request_del(struct rbd_img_request *img_request,
1354 					struct rbd_obj_request *obj_request)
1355 {
1356 	dout("%s: img %p obj %p\n", __func__, img_request, obj_request);
1357 	list_del(&obj_request->ex.oe_item);
1358 	rbd_assert(img_request->obj_request_count > 0);
1359 	img_request->obj_request_count--;
1360 	rbd_assert(obj_request->img_request == img_request);
1361 	rbd_obj_request_put(obj_request);
1362 }
1363 
rbd_obj_request_submit(struct rbd_obj_request * obj_request)1364 static void rbd_obj_request_submit(struct rbd_obj_request *obj_request)
1365 {
1366 	struct ceph_osd_request *osd_req = obj_request->osd_req;
1367 
1368 	dout("%s %p object_no %016llx %llu~%llu osd_req %p\n", __func__,
1369 	     obj_request, obj_request->ex.oe_objno, obj_request->ex.oe_off,
1370 	     obj_request->ex.oe_len, osd_req);
1371 	ceph_osdc_start_request(osd_req->r_osdc, osd_req, false);
1372 }
1373 
1374 /*
1375  * The default/initial value for all image request flags is 0.  Each
1376  * is conditionally set to 1 at image request initialization time
1377  * and currently never change thereafter.
1378  */
img_request_layered_set(struct rbd_img_request * img_request)1379 static void img_request_layered_set(struct rbd_img_request *img_request)
1380 {
1381 	set_bit(IMG_REQ_LAYERED, &img_request->flags);
1382 	smp_mb();
1383 }
1384 
img_request_layered_clear(struct rbd_img_request * img_request)1385 static void img_request_layered_clear(struct rbd_img_request *img_request)
1386 {
1387 	clear_bit(IMG_REQ_LAYERED, &img_request->flags);
1388 	smp_mb();
1389 }
1390 
img_request_layered_test(struct rbd_img_request * img_request)1391 static bool img_request_layered_test(struct rbd_img_request *img_request)
1392 {
1393 	smp_mb();
1394 	return test_bit(IMG_REQ_LAYERED, &img_request->flags) != 0;
1395 }
1396 
rbd_obj_is_entire(struct rbd_obj_request * obj_req)1397 static bool rbd_obj_is_entire(struct rbd_obj_request *obj_req)
1398 {
1399 	struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
1400 
1401 	return !obj_req->ex.oe_off &&
1402 	       obj_req->ex.oe_len == rbd_dev->layout.object_size;
1403 }
1404 
rbd_obj_is_tail(struct rbd_obj_request * obj_req)1405 static bool rbd_obj_is_tail(struct rbd_obj_request *obj_req)
1406 {
1407 	struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
1408 
1409 	return obj_req->ex.oe_off + obj_req->ex.oe_len ==
1410 					rbd_dev->layout.object_size;
1411 }
1412 
rbd_obj_img_extents_bytes(struct rbd_obj_request * obj_req)1413 static u64 rbd_obj_img_extents_bytes(struct rbd_obj_request *obj_req)
1414 {
1415 	return ceph_file_extents_bytes(obj_req->img_extents,
1416 				       obj_req->num_img_extents);
1417 }
1418 
rbd_img_is_write(struct rbd_img_request * img_req)1419 static bool rbd_img_is_write(struct rbd_img_request *img_req)
1420 {
1421 	switch (img_req->op_type) {
1422 	case OBJ_OP_READ:
1423 		return false;
1424 	case OBJ_OP_WRITE:
1425 	case OBJ_OP_DISCARD:
1426 		return true;
1427 	default:
1428 		BUG();
1429 	}
1430 }
1431 
1432 static void rbd_obj_handle_request(struct rbd_obj_request *obj_req);
1433 
rbd_osd_req_callback(struct ceph_osd_request * osd_req)1434 static void rbd_osd_req_callback(struct ceph_osd_request *osd_req)
1435 {
1436 	struct rbd_obj_request *obj_req = osd_req->r_priv;
1437 
1438 	dout("%s osd_req %p result %d for obj_req %p\n", __func__, osd_req,
1439 	     osd_req->r_result, obj_req);
1440 	rbd_assert(osd_req == obj_req->osd_req);
1441 
1442 	obj_req->result = osd_req->r_result < 0 ? osd_req->r_result : 0;
1443 	if (!obj_req->result && !rbd_img_is_write(obj_req->img_request))
1444 		obj_req->xferred = osd_req->r_result;
1445 	else
1446 		/*
1447 		 * Writes aren't allowed to return a data payload.  In some
1448 		 * guarded write cases (e.g. stat + zero on an empty object)
1449 		 * a stat response makes it through, but we don't care.
1450 		 */
1451 		obj_req->xferred = 0;
1452 
1453 	rbd_obj_handle_request(obj_req);
1454 }
1455 
rbd_osd_req_format_read(struct rbd_obj_request * obj_request)1456 static void rbd_osd_req_format_read(struct rbd_obj_request *obj_request)
1457 {
1458 	struct ceph_osd_request *osd_req = obj_request->osd_req;
1459 
1460 	osd_req->r_flags = CEPH_OSD_FLAG_READ;
1461 	osd_req->r_snapid = obj_request->img_request->snap_id;
1462 }
1463 
rbd_osd_req_format_write(struct rbd_obj_request * obj_request)1464 static void rbd_osd_req_format_write(struct rbd_obj_request *obj_request)
1465 {
1466 	struct ceph_osd_request *osd_req = obj_request->osd_req;
1467 
1468 	osd_req->r_flags = CEPH_OSD_FLAG_WRITE;
1469 	ktime_get_real_ts64(&osd_req->r_mtime);
1470 	osd_req->r_data_offset = obj_request->ex.oe_off;
1471 }
1472 
1473 static struct ceph_osd_request *
rbd_osd_req_create(struct rbd_obj_request * obj_req,unsigned int num_ops)1474 rbd_osd_req_create(struct rbd_obj_request *obj_req, unsigned int num_ops)
1475 {
1476 	struct rbd_img_request *img_req = obj_req->img_request;
1477 	struct rbd_device *rbd_dev = img_req->rbd_dev;
1478 	struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
1479 	struct ceph_osd_request *req;
1480 	const char *name_format = rbd_dev->image_format == 1 ?
1481 				      RBD_V1_DATA_FORMAT : RBD_V2_DATA_FORMAT;
1482 
1483 	req = ceph_osdc_alloc_request(osdc,
1484 			(rbd_img_is_write(img_req) ? img_req->snapc : NULL),
1485 			num_ops, false, GFP_NOIO);
1486 	if (!req)
1487 		return NULL;
1488 
1489 	req->r_callback = rbd_osd_req_callback;
1490 	req->r_priv = obj_req;
1491 
1492 	/*
1493 	 * Data objects may be stored in a separate pool, but always in
1494 	 * the same namespace in that pool as the header in its pool.
1495 	 */
1496 	ceph_oloc_copy(&req->r_base_oloc, &rbd_dev->header_oloc);
1497 	req->r_base_oloc.pool = rbd_dev->layout.pool_id;
1498 
1499 	if (ceph_oid_aprintf(&req->r_base_oid, GFP_NOIO, name_format,
1500 			rbd_dev->header.object_prefix, obj_req->ex.oe_objno))
1501 		goto err_req;
1502 
1503 	if (ceph_osdc_alloc_messages(req, GFP_NOIO))
1504 		goto err_req;
1505 
1506 	return req;
1507 
1508 err_req:
1509 	ceph_osdc_put_request(req);
1510 	return NULL;
1511 }
1512 
rbd_osd_req_destroy(struct ceph_osd_request * osd_req)1513 static void rbd_osd_req_destroy(struct ceph_osd_request *osd_req)
1514 {
1515 	ceph_osdc_put_request(osd_req);
1516 }
1517 
rbd_obj_request_create(void)1518 static struct rbd_obj_request *rbd_obj_request_create(void)
1519 {
1520 	struct rbd_obj_request *obj_request;
1521 
1522 	obj_request = kmem_cache_zalloc(rbd_obj_request_cache, GFP_NOIO);
1523 	if (!obj_request)
1524 		return NULL;
1525 
1526 	ceph_object_extent_init(&obj_request->ex);
1527 	kref_init(&obj_request->kref);
1528 
1529 	dout("%s %p\n", __func__, obj_request);
1530 	return obj_request;
1531 }
1532 
rbd_obj_request_destroy(struct kref * kref)1533 static void rbd_obj_request_destroy(struct kref *kref)
1534 {
1535 	struct rbd_obj_request *obj_request;
1536 	u32 i;
1537 
1538 	obj_request = container_of(kref, struct rbd_obj_request, kref);
1539 
1540 	dout("%s: obj %p\n", __func__, obj_request);
1541 
1542 	if (obj_request->osd_req)
1543 		rbd_osd_req_destroy(obj_request->osd_req);
1544 
1545 	switch (obj_request->img_request->data_type) {
1546 	case OBJ_REQUEST_NODATA:
1547 	case OBJ_REQUEST_BIO:
1548 	case OBJ_REQUEST_BVECS:
1549 		break;		/* Nothing to do */
1550 	case OBJ_REQUEST_OWN_BVECS:
1551 		kfree(obj_request->bvec_pos.bvecs);
1552 		break;
1553 	default:
1554 		rbd_assert(0);
1555 	}
1556 
1557 	kfree(obj_request->img_extents);
1558 	if (obj_request->copyup_bvecs) {
1559 		for (i = 0; i < obj_request->copyup_bvec_count; i++) {
1560 			if (obj_request->copyup_bvecs[i].bv_page)
1561 				__free_page(obj_request->copyup_bvecs[i].bv_page);
1562 		}
1563 		kfree(obj_request->copyup_bvecs);
1564 	}
1565 
1566 	kmem_cache_free(rbd_obj_request_cache, obj_request);
1567 }
1568 
1569 /* It's OK to call this for a device with no parent */
1570 
1571 static void rbd_spec_put(struct rbd_spec *spec);
rbd_dev_unparent(struct rbd_device * rbd_dev)1572 static void rbd_dev_unparent(struct rbd_device *rbd_dev)
1573 {
1574 	rbd_dev_remove_parent(rbd_dev);
1575 	rbd_spec_put(rbd_dev->parent_spec);
1576 	rbd_dev->parent_spec = NULL;
1577 	rbd_dev->parent_overlap = 0;
1578 }
1579 
1580 /*
1581  * Parent image reference counting is used to determine when an
1582  * image's parent fields can be safely torn down--after there are no
1583  * more in-flight requests to the parent image.  When the last
1584  * reference is dropped, cleaning them up is safe.
1585  */
rbd_dev_parent_put(struct rbd_device * rbd_dev)1586 static void rbd_dev_parent_put(struct rbd_device *rbd_dev)
1587 {
1588 	int counter;
1589 
1590 	if (!rbd_dev->parent_spec)
1591 		return;
1592 
1593 	counter = atomic_dec_return_safe(&rbd_dev->parent_ref);
1594 	if (counter > 0)
1595 		return;
1596 
1597 	/* Last reference; clean up parent data structures */
1598 
1599 	if (!counter)
1600 		rbd_dev_unparent(rbd_dev);
1601 	else
1602 		rbd_warn(rbd_dev, "parent reference underflow");
1603 }
1604 
1605 /*
1606  * If an image has a non-zero parent overlap, get a reference to its
1607  * parent.
1608  *
1609  * Returns true if the rbd device has a parent with a non-zero
1610  * overlap and a reference for it was successfully taken, or
1611  * false otherwise.
1612  */
rbd_dev_parent_get(struct rbd_device * rbd_dev)1613 static bool rbd_dev_parent_get(struct rbd_device *rbd_dev)
1614 {
1615 	int counter = 0;
1616 
1617 	if (!rbd_dev->parent_spec)
1618 		return false;
1619 
1620 	down_read(&rbd_dev->header_rwsem);
1621 	if (rbd_dev->parent_overlap)
1622 		counter = atomic_inc_return_safe(&rbd_dev->parent_ref);
1623 	up_read(&rbd_dev->header_rwsem);
1624 
1625 	if (counter < 0)
1626 		rbd_warn(rbd_dev, "parent reference overflow");
1627 
1628 	return counter > 0;
1629 }
1630 
1631 /*
1632  * Caller is responsible for filling in the list of object requests
1633  * that comprises the image request, and the Linux request pointer
1634  * (if there is one).
1635  */
rbd_img_request_create(struct rbd_device * rbd_dev,enum obj_operation_type op_type,struct ceph_snap_context * snapc)1636 static struct rbd_img_request *rbd_img_request_create(
1637 					struct rbd_device *rbd_dev,
1638 					enum obj_operation_type op_type,
1639 					struct ceph_snap_context *snapc)
1640 {
1641 	struct rbd_img_request *img_request;
1642 
1643 	img_request = kmem_cache_zalloc(rbd_img_request_cache, GFP_NOIO);
1644 	if (!img_request)
1645 		return NULL;
1646 
1647 	img_request->rbd_dev = rbd_dev;
1648 	img_request->op_type = op_type;
1649 	if (!rbd_img_is_write(img_request))
1650 		img_request->snap_id = rbd_dev->spec->snap_id;
1651 	else
1652 		img_request->snapc = snapc;
1653 
1654 	if (rbd_dev_parent_get(rbd_dev))
1655 		img_request_layered_set(img_request);
1656 
1657 	spin_lock_init(&img_request->completion_lock);
1658 	INIT_LIST_HEAD(&img_request->object_extents);
1659 	kref_init(&img_request->kref);
1660 
1661 	dout("%s: rbd_dev %p %s -> img %p\n", __func__, rbd_dev,
1662 	     obj_op_name(op_type), img_request);
1663 	return img_request;
1664 }
1665 
rbd_img_request_destroy(struct kref * kref)1666 static void rbd_img_request_destroy(struct kref *kref)
1667 {
1668 	struct rbd_img_request *img_request;
1669 	struct rbd_obj_request *obj_request;
1670 	struct rbd_obj_request *next_obj_request;
1671 
1672 	img_request = container_of(kref, struct rbd_img_request, kref);
1673 
1674 	dout("%s: img %p\n", __func__, img_request);
1675 
1676 	for_each_obj_request_safe(img_request, obj_request, next_obj_request)
1677 		rbd_img_obj_request_del(img_request, obj_request);
1678 	rbd_assert(img_request->obj_request_count == 0);
1679 
1680 	if (img_request_layered_test(img_request)) {
1681 		img_request_layered_clear(img_request);
1682 		rbd_dev_parent_put(img_request->rbd_dev);
1683 	}
1684 
1685 	if (rbd_img_is_write(img_request))
1686 		ceph_put_snap_context(img_request->snapc);
1687 
1688 	kmem_cache_free(rbd_img_request_cache, img_request);
1689 }
1690 
prune_extents(struct ceph_file_extent * img_extents,u32 * num_img_extents,u64 overlap)1691 static void prune_extents(struct ceph_file_extent *img_extents,
1692 			  u32 *num_img_extents, u64 overlap)
1693 {
1694 	u32 cnt = *num_img_extents;
1695 
1696 	/* drop extents completely beyond the overlap */
1697 	while (cnt && img_extents[cnt - 1].fe_off >= overlap)
1698 		cnt--;
1699 
1700 	if (cnt) {
1701 		struct ceph_file_extent *ex = &img_extents[cnt - 1];
1702 
1703 		/* trim final overlapping extent */
1704 		if (ex->fe_off + ex->fe_len > overlap)
1705 			ex->fe_len = overlap - ex->fe_off;
1706 	}
1707 
1708 	*num_img_extents = cnt;
1709 }
1710 
1711 /*
1712  * Determine the byte range(s) covered by either just the object extent
1713  * or the entire object in the parent image.
1714  */
rbd_obj_calc_img_extents(struct rbd_obj_request * obj_req,bool entire)1715 static int rbd_obj_calc_img_extents(struct rbd_obj_request *obj_req,
1716 				    bool entire)
1717 {
1718 	struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
1719 	int ret;
1720 
1721 	if (!rbd_dev->parent_overlap)
1722 		return 0;
1723 
1724 	ret = ceph_extent_to_file(&rbd_dev->layout, obj_req->ex.oe_objno,
1725 				  entire ? 0 : obj_req->ex.oe_off,
1726 				  entire ? rbd_dev->layout.object_size :
1727 							obj_req->ex.oe_len,
1728 				  &obj_req->img_extents,
1729 				  &obj_req->num_img_extents);
1730 	if (ret)
1731 		return ret;
1732 
1733 	prune_extents(obj_req->img_extents, &obj_req->num_img_extents,
1734 		      rbd_dev->parent_overlap);
1735 	return 0;
1736 }
1737 
rbd_osd_req_setup_data(struct rbd_obj_request * obj_req,u32 which)1738 static void rbd_osd_req_setup_data(struct rbd_obj_request *obj_req, u32 which)
1739 {
1740 	switch (obj_req->img_request->data_type) {
1741 	case OBJ_REQUEST_BIO:
1742 		osd_req_op_extent_osd_data_bio(obj_req->osd_req, which,
1743 					       &obj_req->bio_pos,
1744 					       obj_req->ex.oe_len);
1745 		break;
1746 	case OBJ_REQUEST_BVECS:
1747 	case OBJ_REQUEST_OWN_BVECS:
1748 		rbd_assert(obj_req->bvec_pos.iter.bi_size ==
1749 							obj_req->ex.oe_len);
1750 		rbd_assert(obj_req->bvec_idx == obj_req->bvec_count);
1751 		osd_req_op_extent_osd_data_bvec_pos(obj_req->osd_req, which,
1752 						    &obj_req->bvec_pos);
1753 		break;
1754 	default:
1755 		rbd_assert(0);
1756 	}
1757 }
1758 
rbd_obj_setup_read(struct rbd_obj_request * obj_req)1759 static int rbd_obj_setup_read(struct rbd_obj_request *obj_req)
1760 {
1761 	obj_req->osd_req = rbd_osd_req_create(obj_req, 1);
1762 	if (!obj_req->osd_req)
1763 		return -ENOMEM;
1764 
1765 	osd_req_op_extent_init(obj_req->osd_req, 0, CEPH_OSD_OP_READ,
1766 			       obj_req->ex.oe_off, obj_req->ex.oe_len, 0, 0);
1767 	rbd_osd_req_setup_data(obj_req, 0);
1768 
1769 	rbd_osd_req_format_read(obj_req);
1770 	return 0;
1771 }
1772 
__rbd_obj_setup_stat(struct rbd_obj_request * obj_req,unsigned int which)1773 static int __rbd_obj_setup_stat(struct rbd_obj_request *obj_req,
1774 				unsigned int which)
1775 {
1776 	struct page **pages;
1777 
1778 	/*
1779 	 * The response data for a STAT call consists of:
1780 	 *     le64 length;
1781 	 *     struct {
1782 	 *         le32 tv_sec;
1783 	 *         le32 tv_nsec;
1784 	 *     } mtime;
1785 	 */
1786 	pages = ceph_alloc_page_vector(1, GFP_NOIO);
1787 	if (IS_ERR(pages))
1788 		return PTR_ERR(pages);
1789 
1790 	osd_req_op_init(obj_req->osd_req, which, CEPH_OSD_OP_STAT, 0);
1791 	osd_req_op_raw_data_in_pages(obj_req->osd_req, which, pages,
1792 				     8 + sizeof(struct ceph_timespec),
1793 				     0, false, true);
1794 	return 0;
1795 }
1796 
__rbd_obj_setup_write(struct rbd_obj_request * obj_req,unsigned int which)1797 static void __rbd_obj_setup_write(struct rbd_obj_request *obj_req,
1798 				  unsigned int which)
1799 {
1800 	struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
1801 	u16 opcode;
1802 
1803 	osd_req_op_alloc_hint_init(obj_req->osd_req, which++,
1804 				   rbd_dev->layout.object_size,
1805 				   rbd_dev->layout.object_size);
1806 
1807 	if (rbd_obj_is_entire(obj_req))
1808 		opcode = CEPH_OSD_OP_WRITEFULL;
1809 	else
1810 		opcode = CEPH_OSD_OP_WRITE;
1811 
1812 	osd_req_op_extent_init(obj_req->osd_req, which, opcode,
1813 			       obj_req->ex.oe_off, obj_req->ex.oe_len, 0, 0);
1814 	rbd_osd_req_setup_data(obj_req, which++);
1815 
1816 	rbd_assert(which == obj_req->osd_req->r_num_ops);
1817 	rbd_osd_req_format_write(obj_req);
1818 }
1819 
rbd_obj_setup_write(struct rbd_obj_request * obj_req)1820 static int rbd_obj_setup_write(struct rbd_obj_request *obj_req)
1821 {
1822 	unsigned int num_osd_ops, which = 0;
1823 	int ret;
1824 
1825 	/* reverse map the entire object onto the parent */
1826 	ret = rbd_obj_calc_img_extents(obj_req, true);
1827 	if (ret)
1828 		return ret;
1829 
1830 	if (obj_req->num_img_extents) {
1831 		obj_req->write_state = RBD_OBJ_WRITE_GUARD;
1832 		num_osd_ops = 3; /* stat + setallochint + write/writefull */
1833 	} else {
1834 		obj_req->write_state = RBD_OBJ_WRITE_FLAT;
1835 		num_osd_ops = 2; /* setallochint + write/writefull */
1836 	}
1837 
1838 	obj_req->osd_req = rbd_osd_req_create(obj_req, num_osd_ops);
1839 	if (!obj_req->osd_req)
1840 		return -ENOMEM;
1841 
1842 	if (obj_req->num_img_extents) {
1843 		ret = __rbd_obj_setup_stat(obj_req, which++);
1844 		if (ret)
1845 			return ret;
1846 	}
1847 
1848 	__rbd_obj_setup_write(obj_req, which);
1849 	return 0;
1850 }
1851 
__rbd_obj_setup_discard(struct rbd_obj_request * obj_req,unsigned int which)1852 static void __rbd_obj_setup_discard(struct rbd_obj_request *obj_req,
1853 				    unsigned int which)
1854 {
1855 	u16 opcode;
1856 
1857 	if (rbd_obj_is_entire(obj_req)) {
1858 		if (obj_req->num_img_extents) {
1859 			osd_req_op_init(obj_req->osd_req, which++,
1860 					CEPH_OSD_OP_CREATE, 0);
1861 			opcode = CEPH_OSD_OP_TRUNCATE;
1862 		} else {
1863 			osd_req_op_init(obj_req->osd_req, which++,
1864 					CEPH_OSD_OP_DELETE, 0);
1865 			opcode = 0;
1866 		}
1867 	} else if (rbd_obj_is_tail(obj_req)) {
1868 		opcode = CEPH_OSD_OP_TRUNCATE;
1869 	} else {
1870 		opcode = CEPH_OSD_OP_ZERO;
1871 	}
1872 
1873 	if (opcode)
1874 		osd_req_op_extent_init(obj_req->osd_req, which++, opcode,
1875 				       obj_req->ex.oe_off, obj_req->ex.oe_len,
1876 				       0, 0);
1877 
1878 	rbd_assert(which == obj_req->osd_req->r_num_ops);
1879 	rbd_osd_req_format_write(obj_req);
1880 }
1881 
rbd_obj_setup_discard(struct rbd_obj_request * obj_req)1882 static int rbd_obj_setup_discard(struct rbd_obj_request *obj_req)
1883 {
1884 	unsigned int num_osd_ops, which = 0;
1885 	int ret;
1886 
1887 	/* reverse map the entire object onto the parent */
1888 	ret = rbd_obj_calc_img_extents(obj_req, true);
1889 	if (ret)
1890 		return ret;
1891 
1892 	if (rbd_obj_is_entire(obj_req)) {
1893 		obj_req->write_state = RBD_OBJ_WRITE_FLAT;
1894 		if (obj_req->num_img_extents)
1895 			num_osd_ops = 2; /* create + truncate */
1896 		else
1897 			num_osd_ops = 1; /* delete */
1898 	} else {
1899 		if (obj_req->num_img_extents) {
1900 			obj_req->write_state = RBD_OBJ_WRITE_GUARD;
1901 			num_osd_ops = 2; /* stat + truncate/zero */
1902 		} else {
1903 			obj_req->write_state = RBD_OBJ_WRITE_FLAT;
1904 			num_osd_ops = 1; /* truncate/zero */
1905 		}
1906 	}
1907 
1908 	obj_req->osd_req = rbd_osd_req_create(obj_req, num_osd_ops);
1909 	if (!obj_req->osd_req)
1910 		return -ENOMEM;
1911 
1912 	if (!rbd_obj_is_entire(obj_req) && obj_req->num_img_extents) {
1913 		ret = __rbd_obj_setup_stat(obj_req, which++);
1914 		if (ret)
1915 			return ret;
1916 	}
1917 
1918 	__rbd_obj_setup_discard(obj_req, which);
1919 	return 0;
1920 }
1921 
1922 /*
1923  * For each object request in @img_req, allocate an OSD request, add
1924  * individual OSD ops and prepare them for submission.  The number of
1925  * OSD ops depends on op_type and the overlap point (if any).
1926  */
__rbd_img_fill_request(struct rbd_img_request * img_req)1927 static int __rbd_img_fill_request(struct rbd_img_request *img_req)
1928 {
1929 	struct rbd_obj_request *obj_req;
1930 	int ret;
1931 
1932 	for_each_obj_request(img_req, obj_req) {
1933 		switch (img_req->op_type) {
1934 		case OBJ_OP_READ:
1935 			ret = rbd_obj_setup_read(obj_req);
1936 			break;
1937 		case OBJ_OP_WRITE:
1938 			ret = rbd_obj_setup_write(obj_req);
1939 			break;
1940 		case OBJ_OP_DISCARD:
1941 			ret = rbd_obj_setup_discard(obj_req);
1942 			break;
1943 		default:
1944 			rbd_assert(0);
1945 		}
1946 		if (ret)
1947 			return ret;
1948 	}
1949 
1950 	return 0;
1951 }
1952 
1953 union rbd_img_fill_iter {
1954 	struct ceph_bio_iter	bio_iter;
1955 	struct ceph_bvec_iter	bvec_iter;
1956 };
1957 
1958 struct rbd_img_fill_ctx {
1959 	enum obj_request_type	pos_type;
1960 	union rbd_img_fill_iter	*pos;
1961 	union rbd_img_fill_iter	iter;
1962 	ceph_object_extent_fn_t	set_pos_fn;
1963 	ceph_object_extent_fn_t	count_fn;
1964 	ceph_object_extent_fn_t	copy_fn;
1965 };
1966 
alloc_object_extent(void * arg)1967 static struct ceph_object_extent *alloc_object_extent(void *arg)
1968 {
1969 	struct rbd_img_request *img_req = arg;
1970 	struct rbd_obj_request *obj_req;
1971 
1972 	obj_req = rbd_obj_request_create();
1973 	if (!obj_req)
1974 		return NULL;
1975 
1976 	rbd_img_obj_request_add(img_req, obj_req);
1977 	return &obj_req->ex;
1978 }
1979 
1980 /*
1981  * While su != os && sc == 1 is technically not fancy (it's the same
1982  * layout as su == os && sc == 1), we can't use the nocopy path for it
1983  * because ->set_pos_fn() should be called only once per object.
1984  * ceph_file_to_extents() invokes action_fn once per stripe unit, so
1985  * treat su != os && sc == 1 as fancy.
1986  */
rbd_layout_is_fancy(struct ceph_file_layout * l)1987 static bool rbd_layout_is_fancy(struct ceph_file_layout *l)
1988 {
1989 	return l->stripe_unit != l->object_size;
1990 }
1991 
rbd_img_fill_request_nocopy(struct rbd_img_request * img_req,struct ceph_file_extent * img_extents,u32 num_img_extents,struct rbd_img_fill_ctx * fctx)1992 static int rbd_img_fill_request_nocopy(struct rbd_img_request *img_req,
1993 				       struct ceph_file_extent *img_extents,
1994 				       u32 num_img_extents,
1995 				       struct rbd_img_fill_ctx *fctx)
1996 {
1997 	u32 i;
1998 	int ret;
1999 
2000 	img_req->data_type = fctx->pos_type;
2001 
2002 	/*
2003 	 * Create object requests and set each object request's starting
2004 	 * position in the provided bio (list) or bio_vec array.
2005 	 */
2006 	fctx->iter = *fctx->pos;
2007 	for (i = 0; i < num_img_extents; i++) {
2008 		ret = ceph_file_to_extents(&img_req->rbd_dev->layout,
2009 					   img_extents[i].fe_off,
2010 					   img_extents[i].fe_len,
2011 					   &img_req->object_extents,
2012 					   alloc_object_extent, img_req,
2013 					   fctx->set_pos_fn, &fctx->iter);
2014 		if (ret)
2015 			return ret;
2016 	}
2017 
2018 	return __rbd_img_fill_request(img_req);
2019 }
2020 
2021 /*
2022  * Map a list of image extents to a list of object extents, create the
2023  * corresponding object requests (normally each to a different object,
2024  * but not always) and add them to @img_req.  For each object request,
2025  * set up its data descriptor to point to the corresponding chunk(s) of
2026  * @fctx->pos data buffer.
2027  *
2028  * Because ceph_file_to_extents() will merge adjacent object extents
2029  * together, each object request's data descriptor may point to multiple
2030  * different chunks of @fctx->pos data buffer.
2031  *
2032  * @fctx->pos data buffer is assumed to be large enough.
2033  */
rbd_img_fill_request(struct rbd_img_request * img_req,struct ceph_file_extent * img_extents,u32 num_img_extents,struct rbd_img_fill_ctx * fctx)2034 static int rbd_img_fill_request(struct rbd_img_request *img_req,
2035 				struct ceph_file_extent *img_extents,
2036 				u32 num_img_extents,
2037 				struct rbd_img_fill_ctx *fctx)
2038 {
2039 	struct rbd_device *rbd_dev = img_req->rbd_dev;
2040 	struct rbd_obj_request *obj_req;
2041 	u32 i;
2042 	int ret;
2043 
2044 	if (fctx->pos_type == OBJ_REQUEST_NODATA ||
2045 	    !rbd_layout_is_fancy(&rbd_dev->layout))
2046 		return rbd_img_fill_request_nocopy(img_req, img_extents,
2047 						   num_img_extents, fctx);
2048 
2049 	img_req->data_type = OBJ_REQUEST_OWN_BVECS;
2050 
2051 	/*
2052 	 * Create object requests and determine ->bvec_count for each object
2053 	 * request.  Note that ->bvec_count sum over all object requests may
2054 	 * be greater than the number of bio_vecs in the provided bio (list)
2055 	 * or bio_vec array because when mapped, those bio_vecs can straddle
2056 	 * stripe unit boundaries.
2057 	 */
2058 	fctx->iter = *fctx->pos;
2059 	for (i = 0; i < num_img_extents; i++) {
2060 		ret = ceph_file_to_extents(&rbd_dev->layout,
2061 					   img_extents[i].fe_off,
2062 					   img_extents[i].fe_len,
2063 					   &img_req->object_extents,
2064 					   alloc_object_extent, img_req,
2065 					   fctx->count_fn, &fctx->iter);
2066 		if (ret)
2067 			return ret;
2068 	}
2069 
2070 	for_each_obj_request(img_req, obj_req) {
2071 		obj_req->bvec_pos.bvecs = kmalloc_array(obj_req->bvec_count,
2072 					      sizeof(*obj_req->bvec_pos.bvecs),
2073 					      GFP_NOIO);
2074 		if (!obj_req->bvec_pos.bvecs)
2075 			return -ENOMEM;
2076 	}
2077 
2078 	/*
2079 	 * Fill in each object request's private bio_vec array, splitting and
2080 	 * rearranging the provided bio_vecs in stripe unit chunks as needed.
2081 	 */
2082 	fctx->iter = *fctx->pos;
2083 	for (i = 0; i < num_img_extents; i++) {
2084 		ret = ceph_iterate_extents(&rbd_dev->layout,
2085 					   img_extents[i].fe_off,
2086 					   img_extents[i].fe_len,
2087 					   &img_req->object_extents,
2088 					   fctx->copy_fn, &fctx->iter);
2089 		if (ret)
2090 			return ret;
2091 	}
2092 
2093 	return __rbd_img_fill_request(img_req);
2094 }
2095 
rbd_img_fill_nodata(struct rbd_img_request * img_req,u64 off,u64 len)2096 static int rbd_img_fill_nodata(struct rbd_img_request *img_req,
2097 			       u64 off, u64 len)
2098 {
2099 	struct ceph_file_extent ex = { off, len };
2100 	union rbd_img_fill_iter dummy;
2101 	struct rbd_img_fill_ctx fctx = {
2102 		.pos_type = OBJ_REQUEST_NODATA,
2103 		.pos = &dummy,
2104 	};
2105 
2106 	return rbd_img_fill_request(img_req, &ex, 1, &fctx);
2107 }
2108 
set_bio_pos(struct ceph_object_extent * ex,u32 bytes,void * arg)2109 static void set_bio_pos(struct ceph_object_extent *ex, u32 bytes, void *arg)
2110 {
2111 	struct rbd_obj_request *obj_req =
2112 	    container_of(ex, struct rbd_obj_request, ex);
2113 	struct ceph_bio_iter *it = arg;
2114 
2115 	dout("%s objno %llu bytes %u\n", __func__, ex->oe_objno, bytes);
2116 	obj_req->bio_pos = *it;
2117 	ceph_bio_iter_advance(it, bytes);
2118 }
2119 
count_bio_bvecs(struct ceph_object_extent * ex,u32 bytes,void * arg)2120 static void count_bio_bvecs(struct ceph_object_extent *ex, u32 bytes, void *arg)
2121 {
2122 	struct rbd_obj_request *obj_req =
2123 	    container_of(ex, struct rbd_obj_request, ex);
2124 	struct ceph_bio_iter *it = arg;
2125 
2126 	dout("%s objno %llu bytes %u\n", __func__, ex->oe_objno, bytes);
2127 	ceph_bio_iter_advance_step(it, bytes, ({
2128 		obj_req->bvec_count++;
2129 	}));
2130 
2131 }
2132 
copy_bio_bvecs(struct ceph_object_extent * ex,u32 bytes,void * arg)2133 static void copy_bio_bvecs(struct ceph_object_extent *ex, u32 bytes, void *arg)
2134 {
2135 	struct rbd_obj_request *obj_req =
2136 	    container_of(ex, struct rbd_obj_request, ex);
2137 	struct ceph_bio_iter *it = arg;
2138 
2139 	dout("%s objno %llu bytes %u\n", __func__, ex->oe_objno, bytes);
2140 	ceph_bio_iter_advance_step(it, bytes, ({
2141 		obj_req->bvec_pos.bvecs[obj_req->bvec_idx++] = bv;
2142 		obj_req->bvec_pos.iter.bi_size += bv.bv_len;
2143 	}));
2144 }
2145 
__rbd_img_fill_from_bio(struct rbd_img_request * img_req,struct ceph_file_extent * img_extents,u32 num_img_extents,struct ceph_bio_iter * bio_pos)2146 static int __rbd_img_fill_from_bio(struct rbd_img_request *img_req,
2147 				   struct ceph_file_extent *img_extents,
2148 				   u32 num_img_extents,
2149 				   struct ceph_bio_iter *bio_pos)
2150 {
2151 	struct rbd_img_fill_ctx fctx = {
2152 		.pos_type = OBJ_REQUEST_BIO,
2153 		.pos = (union rbd_img_fill_iter *)bio_pos,
2154 		.set_pos_fn = set_bio_pos,
2155 		.count_fn = count_bio_bvecs,
2156 		.copy_fn = copy_bio_bvecs,
2157 	};
2158 
2159 	return rbd_img_fill_request(img_req, img_extents, num_img_extents,
2160 				    &fctx);
2161 }
2162 
rbd_img_fill_from_bio(struct rbd_img_request * img_req,u64 off,u64 len,struct bio * bio)2163 static int rbd_img_fill_from_bio(struct rbd_img_request *img_req,
2164 				 u64 off, u64 len, struct bio *bio)
2165 {
2166 	struct ceph_file_extent ex = { off, len };
2167 	struct ceph_bio_iter it = { .bio = bio, .iter = bio->bi_iter };
2168 
2169 	return __rbd_img_fill_from_bio(img_req, &ex, 1, &it);
2170 }
2171 
set_bvec_pos(struct ceph_object_extent * ex,u32 bytes,void * arg)2172 static void set_bvec_pos(struct ceph_object_extent *ex, u32 bytes, void *arg)
2173 {
2174 	struct rbd_obj_request *obj_req =
2175 	    container_of(ex, struct rbd_obj_request, ex);
2176 	struct ceph_bvec_iter *it = arg;
2177 
2178 	obj_req->bvec_pos = *it;
2179 	ceph_bvec_iter_shorten(&obj_req->bvec_pos, bytes);
2180 	ceph_bvec_iter_advance(it, bytes);
2181 }
2182 
count_bvecs(struct ceph_object_extent * ex,u32 bytes,void * arg)2183 static void count_bvecs(struct ceph_object_extent *ex, u32 bytes, void *arg)
2184 {
2185 	struct rbd_obj_request *obj_req =
2186 	    container_of(ex, struct rbd_obj_request, ex);
2187 	struct ceph_bvec_iter *it = arg;
2188 
2189 	ceph_bvec_iter_advance_step(it, bytes, ({
2190 		obj_req->bvec_count++;
2191 	}));
2192 }
2193 
copy_bvecs(struct ceph_object_extent * ex,u32 bytes,void * arg)2194 static void copy_bvecs(struct ceph_object_extent *ex, u32 bytes, void *arg)
2195 {
2196 	struct rbd_obj_request *obj_req =
2197 	    container_of(ex, struct rbd_obj_request, ex);
2198 	struct ceph_bvec_iter *it = arg;
2199 
2200 	ceph_bvec_iter_advance_step(it, bytes, ({
2201 		obj_req->bvec_pos.bvecs[obj_req->bvec_idx++] = bv;
2202 		obj_req->bvec_pos.iter.bi_size += bv.bv_len;
2203 	}));
2204 }
2205 
__rbd_img_fill_from_bvecs(struct rbd_img_request * img_req,struct ceph_file_extent * img_extents,u32 num_img_extents,struct ceph_bvec_iter * bvec_pos)2206 static int __rbd_img_fill_from_bvecs(struct rbd_img_request *img_req,
2207 				     struct ceph_file_extent *img_extents,
2208 				     u32 num_img_extents,
2209 				     struct ceph_bvec_iter *bvec_pos)
2210 {
2211 	struct rbd_img_fill_ctx fctx = {
2212 		.pos_type = OBJ_REQUEST_BVECS,
2213 		.pos = (union rbd_img_fill_iter *)bvec_pos,
2214 		.set_pos_fn = set_bvec_pos,
2215 		.count_fn = count_bvecs,
2216 		.copy_fn = copy_bvecs,
2217 	};
2218 
2219 	return rbd_img_fill_request(img_req, img_extents, num_img_extents,
2220 				    &fctx);
2221 }
2222 
rbd_img_fill_from_bvecs(struct rbd_img_request * img_req,struct ceph_file_extent * img_extents,u32 num_img_extents,struct bio_vec * bvecs)2223 static int rbd_img_fill_from_bvecs(struct rbd_img_request *img_req,
2224 				   struct ceph_file_extent *img_extents,
2225 				   u32 num_img_extents,
2226 				   struct bio_vec *bvecs)
2227 {
2228 	struct ceph_bvec_iter it = {
2229 		.bvecs = bvecs,
2230 		.iter = { .bi_size = ceph_file_extents_bytes(img_extents,
2231 							     num_img_extents) },
2232 	};
2233 
2234 	return __rbd_img_fill_from_bvecs(img_req, img_extents, num_img_extents,
2235 					 &it);
2236 }
2237 
rbd_img_request_submit(struct rbd_img_request * img_request)2238 static void rbd_img_request_submit(struct rbd_img_request *img_request)
2239 {
2240 	struct rbd_obj_request *obj_request;
2241 
2242 	dout("%s: img %p\n", __func__, img_request);
2243 
2244 	rbd_img_request_get(img_request);
2245 	for_each_obj_request(img_request, obj_request)
2246 		rbd_obj_request_submit(obj_request);
2247 
2248 	rbd_img_request_put(img_request);
2249 }
2250 
rbd_obj_read_from_parent(struct rbd_obj_request * obj_req)2251 static int rbd_obj_read_from_parent(struct rbd_obj_request *obj_req)
2252 {
2253 	struct rbd_img_request *img_req = obj_req->img_request;
2254 	struct rbd_img_request *child_img_req;
2255 	int ret;
2256 
2257 	child_img_req = rbd_img_request_create(img_req->rbd_dev->parent,
2258 					       OBJ_OP_READ, NULL);
2259 	if (!child_img_req)
2260 		return -ENOMEM;
2261 
2262 	__set_bit(IMG_REQ_CHILD, &child_img_req->flags);
2263 	child_img_req->obj_request = obj_req;
2264 
2265 	if (!rbd_img_is_write(img_req)) {
2266 		switch (img_req->data_type) {
2267 		case OBJ_REQUEST_BIO:
2268 			ret = __rbd_img_fill_from_bio(child_img_req,
2269 						      obj_req->img_extents,
2270 						      obj_req->num_img_extents,
2271 						      &obj_req->bio_pos);
2272 			break;
2273 		case OBJ_REQUEST_BVECS:
2274 		case OBJ_REQUEST_OWN_BVECS:
2275 			ret = __rbd_img_fill_from_bvecs(child_img_req,
2276 						      obj_req->img_extents,
2277 						      obj_req->num_img_extents,
2278 						      &obj_req->bvec_pos);
2279 			break;
2280 		default:
2281 			rbd_assert(0);
2282 		}
2283 	} else {
2284 		ret = rbd_img_fill_from_bvecs(child_img_req,
2285 					      obj_req->img_extents,
2286 					      obj_req->num_img_extents,
2287 					      obj_req->copyup_bvecs);
2288 	}
2289 	if (ret) {
2290 		rbd_img_request_put(child_img_req);
2291 		return ret;
2292 	}
2293 
2294 	rbd_img_request_submit(child_img_req);
2295 	return 0;
2296 }
2297 
rbd_obj_handle_read(struct rbd_obj_request * obj_req)2298 static bool rbd_obj_handle_read(struct rbd_obj_request *obj_req)
2299 {
2300 	struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2301 	int ret;
2302 
2303 	if (obj_req->result == -ENOENT &&
2304 	    rbd_dev->parent_overlap && !obj_req->tried_parent) {
2305 		/* reverse map this object extent onto the parent */
2306 		ret = rbd_obj_calc_img_extents(obj_req, false);
2307 		if (ret) {
2308 			obj_req->result = ret;
2309 			return true;
2310 		}
2311 
2312 		if (obj_req->num_img_extents) {
2313 			obj_req->tried_parent = true;
2314 			ret = rbd_obj_read_from_parent(obj_req);
2315 			if (ret) {
2316 				obj_req->result = ret;
2317 				return true;
2318 			}
2319 			return false;
2320 		}
2321 	}
2322 
2323 	/*
2324 	 * -ENOENT means a hole in the image -- zero-fill the entire
2325 	 * length of the request.  A short read also implies zero-fill
2326 	 * to the end of the request.  In both cases we update xferred
2327 	 * count to indicate the whole request was satisfied.
2328 	 */
2329 	if (obj_req->result == -ENOENT ||
2330 	    (!obj_req->result && obj_req->xferred < obj_req->ex.oe_len)) {
2331 		rbd_assert(!obj_req->xferred || !obj_req->result);
2332 		rbd_obj_zero_range(obj_req, obj_req->xferred,
2333 				   obj_req->ex.oe_len - obj_req->xferred);
2334 		obj_req->result = 0;
2335 		obj_req->xferred = obj_req->ex.oe_len;
2336 	}
2337 
2338 	return true;
2339 }
2340 
2341 /*
2342  * copyup_bvecs pages are never highmem pages
2343  */
is_zero_bvecs(struct bio_vec * bvecs,u32 bytes)2344 static bool is_zero_bvecs(struct bio_vec *bvecs, u32 bytes)
2345 {
2346 	struct ceph_bvec_iter it = {
2347 		.bvecs = bvecs,
2348 		.iter = { .bi_size = bytes },
2349 	};
2350 
2351 	ceph_bvec_iter_advance_step(&it, bytes, ({
2352 		if (memchr_inv(page_address(bv.bv_page) + bv.bv_offset, 0,
2353 			       bv.bv_len))
2354 			return false;
2355 	}));
2356 	return true;
2357 }
2358 
rbd_obj_issue_copyup(struct rbd_obj_request * obj_req,u32 bytes)2359 static int rbd_obj_issue_copyup(struct rbd_obj_request *obj_req, u32 bytes)
2360 {
2361 	unsigned int num_osd_ops = obj_req->osd_req->r_num_ops;
2362 	int ret;
2363 
2364 	dout("%s obj_req %p bytes %u\n", __func__, obj_req, bytes);
2365 	rbd_assert(obj_req->osd_req->r_ops[0].op == CEPH_OSD_OP_STAT);
2366 	rbd_osd_req_destroy(obj_req->osd_req);
2367 
2368 	/*
2369 	 * Create a copyup request with the same number of OSD ops as
2370 	 * the original request.  The original request was stat + op(s),
2371 	 * the new copyup request will be copyup + the same op(s).
2372 	 */
2373 	obj_req->osd_req = rbd_osd_req_create(obj_req, num_osd_ops);
2374 	if (!obj_req->osd_req)
2375 		return -ENOMEM;
2376 
2377 	ret = osd_req_op_cls_init(obj_req->osd_req, 0, CEPH_OSD_OP_CALL, "rbd",
2378 				  "copyup");
2379 	if (ret)
2380 		return ret;
2381 
2382 	/*
2383 	 * Only send non-zero copyup data to save some I/O and network
2384 	 * bandwidth -- zero copyup data is equivalent to the object not
2385 	 * existing.
2386 	 */
2387 	if (is_zero_bvecs(obj_req->copyup_bvecs, bytes)) {
2388 		dout("%s obj_req %p detected zeroes\n", __func__, obj_req);
2389 		bytes = 0;
2390 	}
2391 	osd_req_op_cls_request_data_bvecs(obj_req->osd_req, 0,
2392 					  obj_req->copyup_bvecs,
2393 					  obj_req->copyup_bvec_count,
2394 					  bytes);
2395 
2396 	switch (obj_req->img_request->op_type) {
2397 	case OBJ_OP_WRITE:
2398 		__rbd_obj_setup_write(obj_req, 1);
2399 		break;
2400 	case OBJ_OP_DISCARD:
2401 		rbd_assert(!rbd_obj_is_entire(obj_req));
2402 		__rbd_obj_setup_discard(obj_req, 1);
2403 		break;
2404 	default:
2405 		rbd_assert(0);
2406 	}
2407 
2408 	rbd_obj_request_submit(obj_req);
2409 	return 0;
2410 }
2411 
setup_copyup_bvecs(struct rbd_obj_request * obj_req,u64 obj_overlap)2412 static int setup_copyup_bvecs(struct rbd_obj_request *obj_req, u64 obj_overlap)
2413 {
2414 	u32 i;
2415 
2416 	rbd_assert(!obj_req->copyup_bvecs);
2417 	obj_req->copyup_bvec_count = calc_pages_for(0, obj_overlap);
2418 	obj_req->copyup_bvecs = kcalloc(obj_req->copyup_bvec_count,
2419 					sizeof(*obj_req->copyup_bvecs),
2420 					GFP_NOIO);
2421 	if (!obj_req->copyup_bvecs)
2422 		return -ENOMEM;
2423 
2424 	for (i = 0; i < obj_req->copyup_bvec_count; i++) {
2425 		unsigned int len = min(obj_overlap, (u64)PAGE_SIZE);
2426 
2427 		obj_req->copyup_bvecs[i].bv_page = alloc_page(GFP_NOIO);
2428 		if (!obj_req->copyup_bvecs[i].bv_page)
2429 			return -ENOMEM;
2430 
2431 		obj_req->copyup_bvecs[i].bv_offset = 0;
2432 		obj_req->copyup_bvecs[i].bv_len = len;
2433 		obj_overlap -= len;
2434 	}
2435 
2436 	rbd_assert(!obj_overlap);
2437 	return 0;
2438 }
2439 
rbd_obj_handle_write_guard(struct rbd_obj_request * obj_req)2440 static int rbd_obj_handle_write_guard(struct rbd_obj_request *obj_req)
2441 {
2442 	struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2443 	int ret;
2444 
2445 	rbd_assert(obj_req->num_img_extents);
2446 	prune_extents(obj_req->img_extents, &obj_req->num_img_extents,
2447 		      rbd_dev->parent_overlap);
2448 	if (!obj_req->num_img_extents) {
2449 		/*
2450 		 * The overlap has become 0 (most likely because the
2451 		 * image has been flattened).  Use rbd_obj_issue_copyup()
2452 		 * to re-submit the original write request -- the copyup
2453 		 * operation itself will be a no-op, since someone must
2454 		 * have populated the child object while we weren't
2455 		 * looking.  Move to WRITE_FLAT state as we'll be done
2456 		 * with the operation once the null copyup completes.
2457 		 */
2458 		obj_req->write_state = RBD_OBJ_WRITE_FLAT;
2459 		return rbd_obj_issue_copyup(obj_req, 0);
2460 	}
2461 
2462 	ret = setup_copyup_bvecs(obj_req, rbd_obj_img_extents_bytes(obj_req));
2463 	if (ret)
2464 		return ret;
2465 
2466 	obj_req->write_state = RBD_OBJ_WRITE_COPYUP;
2467 	return rbd_obj_read_from_parent(obj_req);
2468 }
2469 
rbd_obj_handle_write(struct rbd_obj_request * obj_req)2470 static bool rbd_obj_handle_write(struct rbd_obj_request *obj_req)
2471 {
2472 	int ret;
2473 
2474 again:
2475 	switch (obj_req->write_state) {
2476 	case RBD_OBJ_WRITE_GUARD:
2477 		rbd_assert(!obj_req->xferred);
2478 		if (obj_req->result == -ENOENT) {
2479 			/*
2480 			 * The target object doesn't exist.  Read the data for
2481 			 * the entire target object up to the overlap point (if
2482 			 * any) from the parent, so we can use it for a copyup.
2483 			 */
2484 			ret = rbd_obj_handle_write_guard(obj_req);
2485 			if (ret) {
2486 				obj_req->result = ret;
2487 				return true;
2488 			}
2489 			return false;
2490 		}
2491 		/* fall through */
2492 	case RBD_OBJ_WRITE_FLAT:
2493 		if (!obj_req->result)
2494 			/*
2495 			 * There is no such thing as a successful short
2496 			 * write -- indicate the whole request was satisfied.
2497 			 */
2498 			obj_req->xferred = obj_req->ex.oe_len;
2499 		return true;
2500 	case RBD_OBJ_WRITE_COPYUP:
2501 		obj_req->write_state = RBD_OBJ_WRITE_GUARD;
2502 		if (obj_req->result)
2503 			goto again;
2504 
2505 		rbd_assert(obj_req->xferred);
2506 		ret = rbd_obj_issue_copyup(obj_req, obj_req->xferred);
2507 		if (ret) {
2508 			obj_req->result = ret;
2509 			return true;
2510 		}
2511 		return false;
2512 	default:
2513 		BUG();
2514 	}
2515 }
2516 
2517 /*
2518  * Returns true if @obj_req is completed, or false otherwise.
2519  */
__rbd_obj_handle_request(struct rbd_obj_request * obj_req)2520 static bool __rbd_obj_handle_request(struct rbd_obj_request *obj_req)
2521 {
2522 	switch (obj_req->img_request->op_type) {
2523 	case OBJ_OP_READ:
2524 		return rbd_obj_handle_read(obj_req);
2525 	case OBJ_OP_WRITE:
2526 		return rbd_obj_handle_write(obj_req);
2527 	case OBJ_OP_DISCARD:
2528 		if (rbd_obj_handle_write(obj_req)) {
2529 			/*
2530 			 * Hide -ENOENT from delete/truncate/zero -- discarding
2531 			 * a non-existent object is not a problem.
2532 			 */
2533 			if (obj_req->result == -ENOENT) {
2534 				obj_req->result = 0;
2535 				obj_req->xferred = obj_req->ex.oe_len;
2536 			}
2537 			return true;
2538 		}
2539 		return false;
2540 	default:
2541 		BUG();
2542 	}
2543 }
2544 
rbd_obj_end_request(struct rbd_obj_request * obj_req)2545 static void rbd_obj_end_request(struct rbd_obj_request *obj_req)
2546 {
2547 	struct rbd_img_request *img_req = obj_req->img_request;
2548 
2549 	rbd_assert((!obj_req->result &&
2550 		    obj_req->xferred == obj_req->ex.oe_len) ||
2551 		   (obj_req->result < 0 && !obj_req->xferred));
2552 	if (!obj_req->result) {
2553 		img_req->xferred += obj_req->xferred;
2554 		return;
2555 	}
2556 
2557 	rbd_warn(img_req->rbd_dev,
2558 		 "%s at objno %llu %llu~%llu result %d xferred %llu",
2559 		 obj_op_name(img_req->op_type), obj_req->ex.oe_objno,
2560 		 obj_req->ex.oe_off, obj_req->ex.oe_len, obj_req->result,
2561 		 obj_req->xferred);
2562 	if (!img_req->result) {
2563 		img_req->result = obj_req->result;
2564 		img_req->xferred = 0;
2565 	}
2566 }
2567 
rbd_img_end_child_request(struct rbd_img_request * img_req)2568 static void rbd_img_end_child_request(struct rbd_img_request *img_req)
2569 {
2570 	struct rbd_obj_request *obj_req = img_req->obj_request;
2571 
2572 	rbd_assert(test_bit(IMG_REQ_CHILD, &img_req->flags));
2573 	rbd_assert((!img_req->result &&
2574 		    img_req->xferred == rbd_obj_img_extents_bytes(obj_req)) ||
2575 		   (img_req->result < 0 && !img_req->xferred));
2576 
2577 	obj_req->result = img_req->result;
2578 	obj_req->xferred = img_req->xferred;
2579 	rbd_img_request_put(img_req);
2580 }
2581 
rbd_img_end_request(struct rbd_img_request * img_req)2582 static void rbd_img_end_request(struct rbd_img_request *img_req)
2583 {
2584 	rbd_assert(!test_bit(IMG_REQ_CHILD, &img_req->flags));
2585 	rbd_assert((!img_req->result &&
2586 		    img_req->xferred == blk_rq_bytes(img_req->rq)) ||
2587 		   (img_req->result < 0 && !img_req->xferred));
2588 
2589 	blk_mq_end_request(img_req->rq,
2590 			   errno_to_blk_status(img_req->result));
2591 	rbd_img_request_put(img_req);
2592 }
2593 
rbd_obj_handle_request(struct rbd_obj_request * obj_req)2594 static void rbd_obj_handle_request(struct rbd_obj_request *obj_req)
2595 {
2596 	struct rbd_img_request *img_req;
2597 
2598 again:
2599 	if (!__rbd_obj_handle_request(obj_req))
2600 		return;
2601 
2602 	img_req = obj_req->img_request;
2603 	spin_lock(&img_req->completion_lock);
2604 	rbd_obj_end_request(obj_req);
2605 	rbd_assert(img_req->pending_count);
2606 	if (--img_req->pending_count) {
2607 		spin_unlock(&img_req->completion_lock);
2608 		return;
2609 	}
2610 
2611 	spin_unlock(&img_req->completion_lock);
2612 	if (test_bit(IMG_REQ_CHILD, &img_req->flags)) {
2613 		obj_req = img_req->obj_request;
2614 		rbd_img_end_child_request(img_req);
2615 		goto again;
2616 	}
2617 	rbd_img_end_request(img_req);
2618 }
2619 
2620 static const struct rbd_client_id rbd_empty_cid;
2621 
rbd_cid_equal(const struct rbd_client_id * lhs,const struct rbd_client_id * rhs)2622 static bool rbd_cid_equal(const struct rbd_client_id *lhs,
2623 			  const struct rbd_client_id *rhs)
2624 {
2625 	return lhs->gid == rhs->gid && lhs->handle == rhs->handle;
2626 }
2627 
rbd_get_cid(struct rbd_device * rbd_dev)2628 static struct rbd_client_id rbd_get_cid(struct rbd_device *rbd_dev)
2629 {
2630 	struct rbd_client_id cid;
2631 
2632 	mutex_lock(&rbd_dev->watch_mutex);
2633 	cid.gid = ceph_client_gid(rbd_dev->rbd_client->client);
2634 	cid.handle = rbd_dev->watch_cookie;
2635 	mutex_unlock(&rbd_dev->watch_mutex);
2636 	return cid;
2637 }
2638 
2639 /*
2640  * lock_rwsem must be held for write
2641  */
rbd_set_owner_cid(struct rbd_device * rbd_dev,const struct rbd_client_id * cid)2642 static void rbd_set_owner_cid(struct rbd_device *rbd_dev,
2643 			      const struct rbd_client_id *cid)
2644 {
2645 	dout("%s rbd_dev %p %llu-%llu -> %llu-%llu\n", __func__, rbd_dev,
2646 	     rbd_dev->owner_cid.gid, rbd_dev->owner_cid.handle,
2647 	     cid->gid, cid->handle);
2648 	rbd_dev->owner_cid = *cid; /* struct */
2649 }
2650 
format_lock_cookie(struct rbd_device * rbd_dev,char * buf)2651 static void format_lock_cookie(struct rbd_device *rbd_dev, char *buf)
2652 {
2653 	mutex_lock(&rbd_dev->watch_mutex);
2654 	sprintf(buf, "%s %llu", RBD_LOCK_COOKIE_PREFIX, rbd_dev->watch_cookie);
2655 	mutex_unlock(&rbd_dev->watch_mutex);
2656 }
2657 
__rbd_lock(struct rbd_device * rbd_dev,const char * cookie)2658 static void __rbd_lock(struct rbd_device *rbd_dev, const char *cookie)
2659 {
2660 	struct rbd_client_id cid = rbd_get_cid(rbd_dev);
2661 
2662 	strcpy(rbd_dev->lock_cookie, cookie);
2663 	rbd_set_owner_cid(rbd_dev, &cid);
2664 	queue_work(rbd_dev->task_wq, &rbd_dev->acquired_lock_work);
2665 }
2666 
2667 /*
2668  * lock_rwsem must be held for write
2669  */
rbd_lock(struct rbd_device * rbd_dev)2670 static int rbd_lock(struct rbd_device *rbd_dev)
2671 {
2672 	struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2673 	char cookie[32];
2674 	int ret;
2675 
2676 	WARN_ON(__rbd_is_lock_owner(rbd_dev) ||
2677 		rbd_dev->lock_cookie[0] != '\0');
2678 
2679 	format_lock_cookie(rbd_dev, cookie);
2680 	ret = ceph_cls_lock(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
2681 			    RBD_LOCK_NAME, CEPH_CLS_LOCK_EXCLUSIVE, cookie,
2682 			    RBD_LOCK_TAG, "", 0);
2683 	if (ret)
2684 		return ret;
2685 
2686 	rbd_dev->lock_state = RBD_LOCK_STATE_LOCKED;
2687 	__rbd_lock(rbd_dev, cookie);
2688 	return 0;
2689 }
2690 
2691 /*
2692  * lock_rwsem must be held for write
2693  */
rbd_unlock(struct rbd_device * rbd_dev)2694 static void rbd_unlock(struct rbd_device *rbd_dev)
2695 {
2696 	struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2697 	int ret;
2698 
2699 	WARN_ON(!__rbd_is_lock_owner(rbd_dev) ||
2700 		rbd_dev->lock_cookie[0] == '\0');
2701 
2702 	ret = ceph_cls_unlock(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
2703 			      RBD_LOCK_NAME, rbd_dev->lock_cookie);
2704 	if (ret && ret != -ENOENT)
2705 		rbd_warn(rbd_dev, "failed to unlock: %d", ret);
2706 
2707 	/* treat errors as the image is unlocked */
2708 	rbd_dev->lock_state = RBD_LOCK_STATE_UNLOCKED;
2709 	rbd_dev->lock_cookie[0] = '\0';
2710 	rbd_set_owner_cid(rbd_dev, &rbd_empty_cid);
2711 	queue_work(rbd_dev->task_wq, &rbd_dev->released_lock_work);
2712 }
2713 
__rbd_notify_op_lock(struct rbd_device * rbd_dev,enum rbd_notify_op notify_op,struct page *** preply_pages,size_t * preply_len)2714 static int __rbd_notify_op_lock(struct rbd_device *rbd_dev,
2715 				enum rbd_notify_op notify_op,
2716 				struct page ***preply_pages,
2717 				size_t *preply_len)
2718 {
2719 	struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2720 	struct rbd_client_id cid = rbd_get_cid(rbd_dev);
2721 	char buf[4 + 8 + 8 + CEPH_ENCODING_START_BLK_LEN];
2722 	int buf_size = sizeof(buf);
2723 	void *p = buf;
2724 
2725 	dout("%s rbd_dev %p notify_op %d\n", __func__, rbd_dev, notify_op);
2726 
2727 	/* encode *LockPayload NotifyMessage (op + ClientId) */
2728 	ceph_start_encoding(&p, 2, 1, buf_size - CEPH_ENCODING_START_BLK_LEN);
2729 	ceph_encode_32(&p, notify_op);
2730 	ceph_encode_64(&p, cid.gid);
2731 	ceph_encode_64(&p, cid.handle);
2732 
2733 	return ceph_osdc_notify(osdc, &rbd_dev->header_oid,
2734 				&rbd_dev->header_oloc, buf, buf_size,
2735 				RBD_NOTIFY_TIMEOUT, preply_pages, preply_len);
2736 }
2737 
rbd_notify_op_lock(struct rbd_device * rbd_dev,enum rbd_notify_op notify_op)2738 static void rbd_notify_op_lock(struct rbd_device *rbd_dev,
2739 			       enum rbd_notify_op notify_op)
2740 {
2741 	struct page **reply_pages;
2742 	size_t reply_len;
2743 
2744 	__rbd_notify_op_lock(rbd_dev, notify_op, &reply_pages, &reply_len);
2745 	ceph_release_page_vector(reply_pages, calc_pages_for(0, reply_len));
2746 }
2747 
rbd_notify_acquired_lock(struct work_struct * work)2748 static void rbd_notify_acquired_lock(struct work_struct *work)
2749 {
2750 	struct rbd_device *rbd_dev = container_of(work, struct rbd_device,
2751 						  acquired_lock_work);
2752 
2753 	rbd_notify_op_lock(rbd_dev, RBD_NOTIFY_OP_ACQUIRED_LOCK);
2754 }
2755 
rbd_notify_released_lock(struct work_struct * work)2756 static void rbd_notify_released_lock(struct work_struct *work)
2757 {
2758 	struct rbd_device *rbd_dev = container_of(work, struct rbd_device,
2759 						  released_lock_work);
2760 
2761 	rbd_notify_op_lock(rbd_dev, RBD_NOTIFY_OP_RELEASED_LOCK);
2762 }
2763 
rbd_request_lock(struct rbd_device * rbd_dev)2764 static int rbd_request_lock(struct rbd_device *rbd_dev)
2765 {
2766 	struct page **reply_pages;
2767 	size_t reply_len;
2768 	bool lock_owner_responded = false;
2769 	int ret;
2770 
2771 	dout("%s rbd_dev %p\n", __func__, rbd_dev);
2772 
2773 	ret = __rbd_notify_op_lock(rbd_dev, RBD_NOTIFY_OP_REQUEST_LOCK,
2774 				   &reply_pages, &reply_len);
2775 	if (ret && ret != -ETIMEDOUT) {
2776 		rbd_warn(rbd_dev, "failed to request lock: %d", ret);
2777 		goto out;
2778 	}
2779 
2780 	if (reply_len > 0 && reply_len <= PAGE_SIZE) {
2781 		void *p = page_address(reply_pages[0]);
2782 		void *const end = p + reply_len;
2783 		u32 n;
2784 
2785 		ceph_decode_32_safe(&p, end, n, e_inval); /* num_acks */
2786 		while (n--) {
2787 			u8 struct_v;
2788 			u32 len;
2789 
2790 			ceph_decode_need(&p, end, 8 + 8, e_inval);
2791 			p += 8 + 8; /* skip gid and cookie */
2792 
2793 			ceph_decode_32_safe(&p, end, len, e_inval);
2794 			if (!len)
2795 				continue;
2796 
2797 			if (lock_owner_responded) {
2798 				rbd_warn(rbd_dev,
2799 					 "duplicate lock owners detected");
2800 				ret = -EIO;
2801 				goto out;
2802 			}
2803 
2804 			lock_owner_responded = true;
2805 			ret = ceph_start_decoding(&p, end, 1, "ResponseMessage",
2806 						  &struct_v, &len);
2807 			if (ret) {
2808 				rbd_warn(rbd_dev,
2809 					 "failed to decode ResponseMessage: %d",
2810 					 ret);
2811 				goto e_inval;
2812 			}
2813 
2814 			ret = ceph_decode_32(&p);
2815 		}
2816 	}
2817 
2818 	if (!lock_owner_responded) {
2819 		rbd_warn(rbd_dev, "no lock owners detected");
2820 		ret = -ETIMEDOUT;
2821 	}
2822 
2823 out:
2824 	ceph_release_page_vector(reply_pages, calc_pages_for(0, reply_len));
2825 	return ret;
2826 
2827 e_inval:
2828 	ret = -EINVAL;
2829 	goto out;
2830 }
2831 
wake_requests(struct rbd_device * rbd_dev,bool wake_all)2832 static void wake_requests(struct rbd_device *rbd_dev, bool wake_all)
2833 {
2834 	dout("%s rbd_dev %p wake_all %d\n", __func__, rbd_dev, wake_all);
2835 
2836 	cancel_delayed_work(&rbd_dev->lock_dwork);
2837 	if (wake_all)
2838 		wake_up_all(&rbd_dev->lock_waitq);
2839 	else
2840 		wake_up(&rbd_dev->lock_waitq);
2841 }
2842 
get_lock_owner_info(struct rbd_device * rbd_dev,struct ceph_locker ** lockers,u32 * num_lockers)2843 static int get_lock_owner_info(struct rbd_device *rbd_dev,
2844 			       struct ceph_locker **lockers, u32 *num_lockers)
2845 {
2846 	struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2847 	u8 lock_type;
2848 	char *lock_tag;
2849 	int ret;
2850 
2851 	dout("%s rbd_dev %p\n", __func__, rbd_dev);
2852 
2853 	ret = ceph_cls_lock_info(osdc, &rbd_dev->header_oid,
2854 				 &rbd_dev->header_oloc, RBD_LOCK_NAME,
2855 				 &lock_type, &lock_tag, lockers, num_lockers);
2856 	if (ret)
2857 		return ret;
2858 
2859 	if (*num_lockers == 0) {
2860 		dout("%s rbd_dev %p no lockers detected\n", __func__, rbd_dev);
2861 		goto out;
2862 	}
2863 
2864 	if (strcmp(lock_tag, RBD_LOCK_TAG)) {
2865 		rbd_warn(rbd_dev, "locked by external mechanism, tag %s",
2866 			 lock_tag);
2867 		ret = -EBUSY;
2868 		goto out;
2869 	}
2870 
2871 	if (lock_type == CEPH_CLS_LOCK_SHARED) {
2872 		rbd_warn(rbd_dev, "shared lock type detected");
2873 		ret = -EBUSY;
2874 		goto out;
2875 	}
2876 
2877 	if (strncmp((*lockers)[0].id.cookie, RBD_LOCK_COOKIE_PREFIX,
2878 		    strlen(RBD_LOCK_COOKIE_PREFIX))) {
2879 		rbd_warn(rbd_dev, "locked by external mechanism, cookie %s",
2880 			 (*lockers)[0].id.cookie);
2881 		ret = -EBUSY;
2882 		goto out;
2883 	}
2884 
2885 out:
2886 	kfree(lock_tag);
2887 	return ret;
2888 }
2889 
find_watcher(struct rbd_device * rbd_dev,const struct ceph_locker * locker)2890 static int find_watcher(struct rbd_device *rbd_dev,
2891 			const struct ceph_locker *locker)
2892 {
2893 	struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2894 	struct ceph_watch_item *watchers;
2895 	u32 num_watchers;
2896 	u64 cookie;
2897 	int i;
2898 	int ret;
2899 
2900 	ret = ceph_osdc_list_watchers(osdc, &rbd_dev->header_oid,
2901 				      &rbd_dev->header_oloc, &watchers,
2902 				      &num_watchers);
2903 	if (ret)
2904 		return ret;
2905 
2906 	sscanf(locker->id.cookie, RBD_LOCK_COOKIE_PREFIX " %llu", &cookie);
2907 	for (i = 0; i < num_watchers; i++) {
2908 		if (!memcmp(&watchers[i].addr, &locker->info.addr,
2909 			    sizeof(locker->info.addr)) &&
2910 		    watchers[i].cookie == cookie) {
2911 			struct rbd_client_id cid = {
2912 				.gid = le64_to_cpu(watchers[i].name.num),
2913 				.handle = cookie,
2914 			};
2915 
2916 			dout("%s rbd_dev %p found cid %llu-%llu\n", __func__,
2917 			     rbd_dev, cid.gid, cid.handle);
2918 			rbd_set_owner_cid(rbd_dev, &cid);
2919 			ret = 1;
2920 			goto out;
2921 		}
2922 	}
2923 
2924 	dout("%s rbd_dev %p no watchers\n", __func__, rbd_dev);
2925 	ret = 0;
2926 out:
2927 	kfree(watchers);
2928 	return ret;
2929 }
2930 
2931 /*
2932  * lock_rwsem must be held for write
2933  */
rbd_try_lock(struct rbd_device * rbd_dev)2934 static int rbd_try_lock(struct rbd_device *rbd_dev)
2935 {
2936 	struct ceph_client *client = rbd_dev->rbd_client->client;
2937 	struct ceph_locker *lockers;
2938 	u32 num_lockers;
2939 	int ret;
2940 
2941 	for (;;) {
2942 		ret = rbd_lock(rbd_dev);
2943 		if (ret != -EBUSY)
2944 			return ret;
2945 
2946 		/* determine if the current lock holder is still alive */
2947 		ret = get_lock_owner_info(rbd_dev, &lockers, &num_lockers);
2948 		if (ret)
2949 			return ret;
2950 
2951 		if (num_lockers == 0)
2952 			goto again;
2953 
2954 		ret = find_watcher(rbd_dev, lockers);
2955 		if (ret) {
2956 			if (ret > 0)
2957 				ret = 0; /* have to request lock */
2958 			goto out;
2959 		}
2960 
2961 		rbd_warn(rbd_dev, "%s%llu seems dead, breaking lock",
2962 			 ENTITY_NAME(lockers[0].id.name));
2963 
2964 		ret = ceph_monc_blacklist_add(&client->monc,
2965 					      &lockers[0].info.addr);
2966 		if (ret) {
2967 			rbd_warn(rbd_dev, "blacklist of %s%llu failed: %d",
2968 				 ENTITY_NAME(lockers[0].id.name), ret);
2969 			goto out;
2970 		}
2971 
2972 		ret = ceph_cls_break_lock(&client->osdc, &rbd_dev->header_oid,
2973 					  &rbd_dev->header_oloc, RBD_LOCK_NAME,
2974 					  lockers[0].id.cookie,
2975 					  &lockers[0].id.name);
2976 		if (ret && ret != -ENOENT)
2977 			goto out;
2978 
2979 again:
2980 		ceph_free_lockers(lockers, num_lockers);
2981 	}
2982 
2983 out:
2984 	ceph_free_lockers(lockers, num_lockers);
2985 	return ret;
2986 }
2987 
2988 /*
2989  * ret is set only if lock_state is RBD_LOCK_STATE_UNLOCKED
2990  */
rbd_try_acquire_lock(struct rbd_device * rbd_dev,int * pret)2991 static enum rbd_lock_state rbd_try_acquire_lock(struct rbd_device *rbd_dev,
2992 						int *pret)
2993 {
2994 	enum rbd_lock_state lock_state;
2995 
2996 	down_read(&rbd_dev->lock_rwsem);
2997 	dout("%s rbd_dev %p read lock_state %d\n", __func__, rbd_dev,
2998 	     rbd_dev->lock_state);
2999 	if (__rbd_is_lock_owner(rbd_dev)) {
3000 		lock_state = rbd_dev->lock_state;
3001 		up_read(&rbd_dev->lock_rwsem);
3002 		return lock_state;
3003 	}
3004 
3005 	up_read(&rbd_dev->lock_rwsem);
3006 	down_write(&rbd_dev->lock_rwsem);
3007 	dout("%s rbd_dev %p write lock_state %d\n", __func__, rbd_dev,
3008 	     rbd_dev->lock_state);
3009 	if (!__rbd_is_lock_owner(rbd_dev)) {
3010 		*pret = rbd_try_lock(rbd_dev);
3011 		if (*pret)
3012 			rbd_warn(rbd_dev, "failed to acquire lock: %d", *pret);
3013 	}
3014 
3015 	lock_state = rbd_dev->lock_state;
3016 	up_write(&rbd_dev->lock_rwsem);
3017 	return lock_state;
3018 }
3019 
rbd_acquire_lock(struct work_struct * work)3020 static void rbd_acquire_lock(struct work_struct *work)
3021 {
3022 	struct rbd_device *rbd_dev = container_of(to_delayed_work(work),
3023 					    struct rbd_device, lock_dwork);
3024 	enum rbd_lock_state lock_state;
3025 	int ret = 0;
3026 
3027 	dout("%s rbd_dev %p\n", __func__, rbd_dev);
3028 again:
3029 	lock_state = rbd_try_acquire_lock(rbd_dev, &ret);
3030 	if (lock_state != RBD_LOCK_STATE_UNLOCKED || ret == -EBLACKLISTED) {
3031 		if (lock_state == RBD_LOCK_STATE_LOCKED)
3032 			wake_requests(rbd_dev, true);
3033 		dout("%s rbd_dev %p lock_state %d ret %d - done\n", __func__,
3034 		     rbd_dev, lock_state, ret);
3035 		return;
3036 	}
3037 
3038 	ret = rbd_request_lock(rbd_dev);
3039 	if (ret == -ETIMEDOUT) {
3040 		goto again; /* treat this as a dead client */
3041 	} else if (ret == -EROFS) {
3042 		rbd_warn(rbd_dev, "peer will not release lock");
3043 		/*
3044 		 * If this is rbd_add_acquire_lock(), we want to fail
3045 		 * immediately -- reuse BLACKLISTED flag.  Otherwise we
3046 		 * want to block.
3047 		 */
3048 		if (!(rbd_dev->disk->flags & GENHD_FL_UP)) {
3049 			set_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags);
3050 			/* wake "rbd map --exclusive" process */
3051 			wake_requests(rbd_dev, false);
3052 		}
3053 	} else if (ret < 0) {
3054 		rbd_warn(rbd_dev, "error requesting lock: %d", ret);
3055 		mod_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork,
3056 				 RBD_RETRY_DELAY);
3057 	} else {
3058 		/*
3059 		 * lock owner acked, but resend if we don't see them
3060 		 * release the lock
3061 		 */
3062 		dout("%s rbd_dev %p requeueing lock_dwork\n", __func__,
3063 		     rbd_dev);
3064 		mod_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork,
3065 		    msecs_to_jiffies(2 * RBD_NOTIFY_TIMEOUT * MSEC_PER_SEC));
3066 	}
3067 }
3068 
3069 /*
3070  * lock_rwsem must be held for write
3071  */
rbd_release_lock(struct rbd_device * rbd_dev)3072 static bool rbd_release_lock(struct rbd_device *rbd_dev)
3073 {
3074 	dout("%s rbd_dev %p read lock_state %d\n", __func__, rbd_dev,
3075 	     rbd_dev->lock_state);
3076 	if (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED)
3077 		return false;
3078 
3079 	rbd_dev->lock_state = RBD_LOCK_STATE_RELEASING;
3080 	downgrade_write(&rbd_dev->lock_rwsem);
3081 	/*
3082 	 * Ensure that all in-flight IO is flushed.
3083 	 *
3084 	 * FIXME: ceph_osdc_sync() flushes the entire OSD client, which
3085 	 * may be shared with other devices.
3086 	 */
3087 	ceph_osdc_sync(&rbd_dev->rbd_client->client->osdc);
3088 	up_read(&rbd_dev->lock_rwsem);
3089 
3090 	down_write(&rbd_dev->lock_rwsem);
3091 	dout("%s rbd_dev %p write lock_state %d\n", __func__, rbd_dev,
3092 	     rbd_dev->lock_state);
3093 	if (rbd_dev->lock_state != RBD_LOCK_STATE_RELEASING)
3094 		return false;
3095 
3096 	rbd_unlock(rbd_dev);
3097 	/*
3098 	 * Give others a chance to grab the lock - we would re-acquire
3099 	 * almost immediately if we got new IO during ceph_osdc_sync()
3100 	 * otherwise.  We need to ack our own notifications, so this
3101 	 * lock_dwork will be requeued from rbd_wait_state_locked()
3102 	 * after wake_requests() in rbd_handle_released_lock().
3103 	 */
3104 	cancel_delayed_work(&rbd_dev->lock_dwork);
3105 	return true;
3106 }
3107 
rbd_release_lock_work(struct work_struct * work)3108 static void rbd_release_lock_work(struct work_struct *work)
3109 {
3110 	struct rbd_device *rbd_dev = container_of(work, struct rbd_device,
3111 						  unlock_work);
3112 
3113 	down_write(&rbd_dev->lock_rwsem);
3114 	rbd_release_lock(rbd_dev);
3115 	up_write(&rbd_dev->lock_rwsem);
3116 }
3117 
rbd_handle_acquired_lock(struct rbd_device * rbd_dev,u8 struct_v,void ** p)3118 static void rbd_handle_acquired_lock(struct rbd_device *rbd_dev, u8 struct_v,
3119 				     void **p)
3120 {
3121 	struct rbd_client_id cid = { 0 };
3122 
3123 	if (struct_v >= 2) {
3124 		cid.gid = ceph_decode_64(p);
3125 		cid.handle = ceph_decode_64(p);
3126 	}
3127 
3128 	dout("%s rbd_dev %p cid %llu-%llu\n", __func__, rbd_dev, cid.gid,
3129 	     cid.handle);
3130 	if (!rbd_cid_equal(&cid, &rbd_empty_cid)) {
3131 		down_write(&rbd_dev->lock_rwsem);
3132 		if (rbd_cid_equal(&cid, &rbd_dev->owner_cid)) {
3133 			/*
3134 			 * we already know that the remote client is
3135 			 * the owner
3136 			 */
3137 			up_write(&rbd_dev->lock_rwsem);
3138 			return;
3139 		}
3140 
3141 		rbd_set_owner_cid(rbd_dev, &cid);
3142 		downgrade_write(&rbd_dev->lock_rwsem);
3143 	} else {
3144 		down_read(&rbd_dev->lock_rwsem);
3145 	}
3146 
3147 	if (!__rbd_is_lock_owner(rbd_dev))
3148 		wake_requests(rbd_dev, false);
3149 	up_read(&rbd_dev->lock_rwsem);
3150 }
3151 
rbd_handle_released_lock(struct rbd_device * rbd_dev,u8 struct_v,void ** p)3152 static void rbd_handle_released_lock(struct rbd_device *rbd_dev, u8 struct_v,
3153 				     void **p)
3154 {
3155 	struct rbd_client_id cid = { 0 };
3156 
3157 	if (struct_v >= 2) {
3158 		cid.gid = ceph_decode_64(p);
3159 		cid.handle = ceph_decode_64(p);
3160 	}
3161 
3162 	dout("%s rbd_dev %p cid %llu-%llu\n", __func__, rbd_dev, cid.gid,
3163 	     cid.handle);
3164 	if (!rbd_cid_equal(&cid, &rbd_empty_cid)) {
3165 		down_write(&rbd_dev->lock_rwsem);
3166 		if (!rbd_cid_equal(&cid, &rbd_dev->owner_cid)) {
3167 			dout("%s rbd_dev %p unexpected owner, cid %llu-%llu != owner_cid %llu-%llu\n",
3168 			     __func__, rbd_dev, cid.gid, cid.handle,
3169 			     rbd_dev->owner_cid.gid, rbd_dev->owner_cid.handle);
3170 			up_write(&rbd_dev->lock_rwsem);
3171 			return;
3172 		}
3173 
3174 		rbd_set_owner_cid(rbd_dev, &rbd_empty_cid);
3175 		downgrade_write(&rbd_dev->lock_rwsem);
3176 	} else {
3177 		down_read(&rbd_dev->lock_rwsem);
3178 	}
3179 
3180 	if (!__rbd_is_lock_owner(rbd_dev))
3181 		wake_requests(rbd_dev, false);
3182 	up_read(&rbd_dev->lock_rwsem);
3183 }
3184 
3185 /*
3186  * Returns result for ResponseMessage to be encoded (<= 0), or 1 if no
3187  * ResponseMessage is needed.
3188  */
rbd_handle_request_lock(struct rbd_device * rbd_dev,u8 struct_v,void ** p)3189 static int rbd_handle_request_lock(struct rbd_device *rbd_dev, u8 struct_v,
3190 				   void **p)
3191 {
3192 	struct rbd_client_id my_cid = rbd_get_cid(rbd_dev);
3193 	struct rbd_client_id cid = { 0 };
3194 	int result = 1;
3195 
3196 	if (struct_v >= 2) {
3197 		cid.gid = ceph_decode_64(p);
3198 		cid.handle = ceph_decode_64(p);
3199 	}
3200 
3201 	dout("%s rbd_dev %p cid %llu-%llu\n", __func__, rbd_dev, cid.gid,
3202 	     cid.handle);
3203 	if (rbd_cid_equal(&cid, &my_cid))
3204 		return result;
3205 
3206 	down_read(&rbd_dev->lock_rwsem);
3207 	if (__rbd_is_lock_owner(rbd_dev)) {
3208 		if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED &&
3209 		    rbd_cid_equal(&rbd_dev->owner_cid, &rbd_empty_cid))
3210 			goto out_unlock;
3211 
3212 		/*
3213 		 * encode ResponseMessage(0) so the peer can detect
3214 		 * a missing owner
3215 		 */
3216 		result = 0;
3217 
3218 		if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED) {
3219 			if (!rbd_dev->opts->exclusive) {
3220 				dout("%s rbd_dev %p queueing unlock_work\n",
3221 				     __func__, rbd_dev);
3222 				queue_work(rbd_dev->task_wq,
3223 					   &rbd_dev->unlock_work);
3224 			} else {
3225 				/* refuse to release the lock */
3226 				result = -EROFS;
3227 			}
3228 		}
3229 	}
3230 
3231 out_unlock:
3232 	up_read(&rbd_dev->lock_rwsem);
3233 	return result;
3234 }
3235 
__rbd_acknowledge_notify(struct rbd_device * rbd_dev,u64 notify_id,u64 cookie,s32 * result)3236 static void __rbd_acknowledge_notify(struct rbd_device *rbd_dev,
3237 				     u64 notify_id, u64 cookie, s32 *result)
3238 {
3239 	struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3240 	char buf[4 + CEPH_ENCODING_START_BLK_LEN];
3241 	int buf_size = sizeof(buf);
3242 	int ret;
3243 
3244 	if (result) {
3245 		void *p = buf;
3246 
3247 		/* encode ResponseMessage */
3248 		ceph_start_encoding(&p, 1, 1,
3249 				    buf_size - CEPH_ENCODING_START_BLK_LEN);
3250 		ceph_encode_32(&p, *result);
3251 	} else {
3252 		buf_size = 0;
3253 	}
3254 
3255 	ret = ceph_osdc_notify_ack(osdc, &rbd_dev->header_oid,
3256 				   &rbd_dev->header_oloc, notify_id, cookie,
3257 				   buf, buf_size);
3258 	if (ret)
3259 		rbd_warn(rbd_dev, "acknowledge_notify failed: %d", ret);
3260 }
3261 
rbd_acknowledge_notify(struct rbd_device * rbd_dev,u64 notify_id,u64 cookie)3262 static void rbd_acknowledge_notify(struct rbd_device *rbd_dev, u64 notify_id,
3263 				   u64 cookie)
3264 {
3265 	dout("%s rbd_dev %p\n", __func__, rbd_dev);
3266 	__rbd_acknowledge_notify(rbd_dev, notify_id, cookie, NULL);
3267 }
3268 
rbd_acknowledge_notify_result(struct rbd_device * rbd_dev,u64 notify_id,u64 cookie,s32 result)3269 static void rbd_acknowledge_notify_result(struct rbd_device *rbd_dev,
3270 					  u64 notify_id, u64 cookie, s32 result)
3271 {
3272 	dout("%s rbd_dev %p result %d\n", __func__, rbd_dev, result);
3273 	__rbd_acknowledge_notify(rbd_dev, notify_id, cookie, &result);
3274 }
3275 
rbd_watch_cb(void * arg,u64 notify_id,u64 cookie,u64 notifier_id,void * data,size_t data_len)3276 static void rbd_watch_cb(void *arg, u64 notify_id, u64 cookie,
3277 			 u64 notifier_id, void *data, size_t data_len)
3278 {
3279 	struct rbd_device *rbd_dev = arg;
3280 	void *p = data;
3281 	void *const end = p + data_len;
3282 	u8 struct_v = 0;
3283 	u32 len;
3284 	u32 notify_op;
3285 	int ret;
3286 
3287 	dout("%s rbd_dev %p cookie %llu notify_id %llu data_len %zu\n",
3288 	     __func__, rbd_dev, cookie, notify_id, data_len);
3289 	if (data_len) {
3290 		ret = ceph_start_decoding(&p, end, 1, "NotifyMessage",
3291 					  &struct_v, &len);
3292 		if (ret) {
3293 			rbd_warn(rbd_dev, "failed to decode NotifyMessage: %d",
3294 				 ret);
3295 			return;
3296 		}
3297 
3298 		notify_op = ceph_decode_32(&p);
3299 	} else {
3300 		/* legacy notification for header updates */
3301 		notify_op = RBD_NOTIFY_OP_HEADER_UPDATE;
3302 		len = 0;
3303 	}
3304 
3305 	dout("%s rbd_dev %p notify_op %u\n", __func__, rbd_dev, notify_op);
3306 	switch (notify_op) {
3307 	case RBD_NOTIFY_OP_ACQUIRED_LOCK:
3308 		rbd_handle_acquired_lock(rbd_dev, struct_v, &p);
3309 		rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
3310 		break;
3311 	case RBD_NOTIFY_OP_RELEASED_LOCK:
3312 		rbd_handle_released_lock(rbd_dev, struct_v, &p);
3313 		rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
3314 		break;
3315 	case RBD_NOTIFY_OP_REQUEST_LOCK:
3316 		ret = rbd_handle_request_lock(rbd_dev, struct_v, &p);
3317 		if (ret <= 0)
3318 			rbd_acknowledge_notify_result(rbd_dev, notify_id,
3319 						      cookie, ret);
3320 		else
3321 			rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
3322 		break;
3323 	case RBD_NOTIFY_OP_HEADER_UPDATE:
3324 		ret = rbd_dev_refresh(rbd_dev);
3325 		if (ret)
3326 			rbd_warn(rbd_dev, "refresh failed: %d", ret);
3327 
3328 		rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
3329 		break;
3330 	default:
3331 		if (rbd_is_lock_owner(rbd_dev))
3332 			rbd_acknowledge_notify_result(rbd_dev, notify_id,
3333 						      cookie, -EOPNOTSUPP);
3334 		else
3335 			rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
3336 		break;
3337 	}
3338 }
3339 
3340 static void __rbd_unregister_watch(struct rbd_device *rbd_dev);
3341 
rbd_watch_errcb(void * arg,u64 cookie,int err)3342 static void rbd_watch_errcb(void *arg, u64 cookie, int err)
3343 {
3344 	struct rbd_device *rbd_dev = arg;
3345 
3346 	rbd_warn(rbd_dev, "encountered watch error: %d", err);
3347 
3348 	down_write(&rbd_dev->lock_rwsem);
3349 	rbd_set_owner_cid(rbd_dev, &rbd_empty_cid);
3350 	up_write(&rbd_dev->lock_rwsem);
3351 
3352 	mutex_lock(&rbd_dev->watch_mutex);
3353 	if (rbd_dev->watch_state == RBD_WATCH_STATE_REGISTERED) {
3354 		__rbd_unregister_watch(rbd_dev);
3355 		rbd_dev->watch_state = RBD_WATCH_STATE_ERROR;
3356 
3357 		queue_delayed_work(rbd_dev->task_wq, &rbd_dev->watch_dwork, 0);
3358 	}
3359 	mutex_unlock(&rbd_dev->watch_mutex);
3360 }
3361 
3362 /*
3363  * watch_mutex must be locked
3364  */
__rbd_register_watch(struct rbd_device * rbd_dev)3365 static int __rbd_register_watch(struct rbd_device *rbd_dev)
3366 {
3367 	struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3368 	struct ceph_osd_linger_request *handle;
3369 
3370 	rbd_assert(!rbd_dev->watch_handle);
3371 	dout("%s rbd_dev %p\n", __func__, rbd_dev);
3372 
3373 	handle = ceph_osdc_watch(osdc, &rbd_dev->header_oid,
3374 				 &rbd_dev->header_oloc, rbd_watch_cb,
3375 				 rbd_watch_errcb, rbd_dev);
3376 	if (IS_ERR(handle))
3377 		return PTR_ERR(handle);
3378 
3379 	rbd_dev->watch_handle = handle;
3380 	return 0;
3381 }
3382 
3383 /*
3384  * watch_mutex must be locked
3385  */
__rbd_unregister_watch(struct rbd_device * rbd_dev)3386 static void __rbd_unregister_watch(struct rbd_device *rbd_dev)
3387 {
3388 	struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3389 	int ret;
3390 
3391 	rbd_assert(rbd_dev->watch_handle);
3392 	dout("%s rbd_dev %p\n", __func__, rbd_dev);
3393 
3394 	ret = ceph_osdc_unwatch(osdc, rbd_dev->watch_handle);
3395 	if (ret)
3396 		rbd_warn(rbd_dev, "failed to unwatch: %d", ret);
3397 
3398 	rbd_dev->watch_handle = NULL;
3399 }
3400 
rbd_register_watch(struct rbd_device * rbd_dev)3401 static int rbd_register_watch(struct rbd_device *rbd_dev)
3402 {
3403 	int ret;
3404 
3405 	mutex_lock(&rbd_dev->watch_mutex);
3406 	rbd_assert(rbd_dev->watch_state == RBD_WATCH_STATE_UNREGISTERED);
3407 	ret = __rbd_register_watch(rbd_dev);
3408 	if (ret)
3409 		goto out;
3410 
3411 	rbd_dev->watch_state = RBD_WATCH_STATE_REGISTERED;
3412 	rbd_dev->watch_cookie = rbd_dev->watch_handle->linger_id;
3413 
3414 out:
3415 	mutex_unlock(&rbd_dev->watch_mutex);
3416 	return ret;
3417 }
3418 
cancel_tasks_sync(struct rbd_device * rbd_dev)3419 static void cancel_tasks_sync(struct rbd_device *rbd_dev)
3420 {
3421 	dout("%s rbd_dev %p\n", __func__, rbd_dev);
3422 
3423 	cancel_work_sync(&rbd_dev->acquired_lock_work);
3424 	cancel_work_sync(&rbd_dev->released_lock_work);
3425 	cancel_delayed_work_sync(&rbd_dev->lock_dwork);
3426 	cancel_work_sync(&rbd_dev->unlock_work);
3427 }
3428 
rbd_unregister_watch(struct rbd_device * rbd_dev)3429 static void rbd_unregister_watch(struct rbd_device *rbd_dev)
3430 {
3431 	WARN_ON(waitqueue_active(&rbd_dev->lock_waitq));
3432 	cancel_tasks_sync(rbd_dev);
3433 
3434 	mutex_lock(&rbd_dev->watch_mutex);
3435 	if (rbd_dev->watch_state == RBD_WATCH_STATE_REGISTERED)
3436 		__rbd_unregister_watch(rbd_dev);
3437 	rbd_dev->watch_state = RBD_WATCH_STATE_UNREGISTERED;
3438 	mutex_unlock(&rbd_dev->watch_mutex);
3439 
3440 	cancel_delayed_work_sync(&rbd_dev->watch_dwork);
3441 	ceph_osdc_flush_notifies(&rbd_dev->rbd_client->client->osdc);
3442 }
3443 
3444 /*
3445  * lock_rwsem must be held for write
3446  */
rbd_reacquire_lock(struct rbd_device * rbd_dev)3447 static void rbd_reacquire_lock(struct rbd_device *rbd_dev)
3448 {
3449 	struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3450 	char cookie[32];
3451 	int ret;
3452 
3453 	WARN_ON(rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED);
3454 
3455 	format_lock_cookie(rbd_dev, cookie);
3456 	ret = ceph_cls_set_cookie(osdc, &rbd_dev->header_oid,
3457 				  &rbd_dev->header_oloc, RBD_LOCK_NAME,
3458 				  CEPH_CLS_LOCK_EXCLUSIVE, rbd_dev->lock_cookie,
3459 				  RBD_LOCK_TAG, cookie);
3460 	if (ret) {
3461 		if (ret != -EOPNOTSUPP)
3462 			rbd_warn(rbd_dev, "failed to update lock cookie: %d",
3463 				 ret);
3464 
3465 		/*
3466 		 * Lock cookie cannot be updated on older OSDs, so do
3467 		 * a manual release and queue an acquire.
3468 		 */
3469 		if (rbd_release_lock(rbd_dev))
3470 			queue_delayed_work(rbd_dev->task_wq,
3471 					   &rbd_dev->lock_dwork, 0);
3472 	} else {
3473 		__rbd_lock(rbd_dev, cookie);
3474 	}
3475 }
3476 
rbd_reregister_watch(struct work_struct * work)3477 static void rbd_reregister_watch(struct work_struct *work)
3478 {
3479 	struct rbd_device *rbd_dev = container_of(to_delayed_work(work),
3480 					    struct rbd_device, watch_dwork);
3481 	int ret;
3482 
3483 	dout("%s rbd_dev %p\n", __func__, rbd_dev);
3484 
3485 	mutex_lock(&rbd_dev->watch_mutex);
3486 	if (rbd_dev->watch_state != RBD_WATCH_STATE_ERROR) {
3487 		mutex_unlock(&rbd_dev->watch_mutex);
3488 		return;
3489 	}
3490 
3491 	ret = __rbd_register_watch(rbd_dev);
3492 	if (ret) {
3493 		rbd_warn(rbd_dev, "failed to reregister watch: %d", ret);
3494 		if (ret == -EBLACKLISTED || ret == -ENOENT) {
3495 			set_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags);
3496 			wake_requests(rbd_dev, true);
3497 		} else {
3498 			queue_delayed_work(rbd_dev->task_wq,
3499 					   &rbd_dev->watch_dwork,
3500 					   RBD_RETRY_DELAY);
3501 		}
3502 		mutex_unlock(&rbd_dev->watch_mutex);
3503 		return;
3504 	}
3505 
3506 	rbd_dev->watch_state = RBD_WATCH_STATE_REGISTERED;
3507 	rbd_dev->watch_cookie = rbd_dev->watch_handle->linger_id;
3508 	mutex_unlock(&rbd_dev->watch_mutex);
3509 
3510 	down_write(&rbd_dev->lock_rwsem);
3511 	if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED)
3512 		rbd_reacquire_lock(rbd_dev);
3513 	up_write(&rbd_dev->lock_rwsem);
3514 
3515 	ret = rbd_dev_refresh(rbd_dev);
3516 	if (ret)
3517 		rbd_warn(rbd_dev, "reregistration refresh failed: %d", ret);
3518 }
3519 
3520 /*
3521  * Synchronous osd object method call.  Returns the number of bytes
3522  * returned in the outbound buffer, or a negative error code.
3523  */
rbd_obj_method_sync(struct rbd_device * rbd_dev,struct ceph_object_id * oid,struct ceph_object_locator * oloc,const char * method_name,const void * outbound,size_t outbound_size,void * inbound,size_t inbound_size)3524 static int rbd_obj_method_sync(struct rbd_device *rbd_dev,
3525 			     struct ceph_object_id *oid,
3526 			     struct ceph_object_locator *oloc,
3527 			     const char *method_name,
3528 			     const void *outbound,
3529 			     size_t outbound_size,
3530 			     void *inbound,
3531 			     size_t inbound_size)
3532 {
3533 	struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3534 	struct page *req_page = NULL;
3535 	struct page *reply_page;
3536 	int ret;
3537 
3538 	/*
3539 	 * Method calls are ultimately read operations.  The result
3540 	 * should placed into the inbound buffer provided.  They
3541 	 * also supply outbound data--parameters for the object
3542 	 * method.  Currently if this is present it will be a
3543 	 * snapshot id.
3544 	 */
3545 	if (outbound) {
3546 		if (outbound_size > PAGE_SIZE)
3547 			return -E2BIG;
3548 
3549 		req_page = alloc_page(GFP_KERNEL);
3550 		if (!req_page)
3551 			return -ENOMEM;
3552 
3553 		memcpy(page_address(req_page), outbound, outbound_size);
3554 	}
3555 
3556 	reply_page = alloc_page(GFP_KERNEL);
3557 	if (!reply_page) {
3558 		if (req_page)
3559 			__free_page(req_page);
3560 		return -ENOMEM;
3561 	}
3562 
3563 	ret = ceph_osdc_call(osdc, oid, oloc, RBD_DRV_NAME, method_name,
3564 			     CEPH_OSD_FLAG_READ, req_page, outbound_size,
3565 			     reply_page, &inbound_size);
3566 	if (!ret) {
3567 		memcpy(inbound, page_address(reply_page), inbound_size);
3568 		ret = inbound_size;
3569 	}
3570 
3571 	if (req_page)
3572 		__free_page(req_page);
3573 	__free_page(reply_page);
3574 	return ret;
3575 }
3576 
3577 /*
3578  * lock_rwsem must be held for read
3579  */
rbd_wait_state_locked(struct rbd_device * rbd_dev,bool may_acquire)3580 static int rbd_wait_state_locked(struct rbd_device *rbd_dev, bool may_acquire)
3581 {
3582 	DEFINE_WAIT(wait);
3583 	unsigned long timeout;
3584 	int ret = 0;
3585 
3586 	if (test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags))
3587 		return -EBLACKLISTED;
3588 
3589 	if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED)
3590 		return 0;
3591 
3592 	if (!may_acquire) {
3593 		rbd_warn(rbd_dev, "exclusive lock required");
3594 		return -EROFS;
3595 	}
3596 
3597 	do {
3598 		/*
3599 		 * Note the use of mod_delayed_work() in rbd_acquire_lock()
3600 		 * and cancel_delayed_work() in wake_requests().
3601 		 */
3602 		dout("%s rbd_dev %p queueing lock_dwork\n", __func__, rbd_dev);
3603 		queue_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork, 0);
3604 		prepare_to_wait_exclusive(&rbd_dev->lock_waitq, &wait,
3605 					  TASK_UNINTERRUPTIBLE);
3606 		up_read(&rbd_dev->lock_rwsem);
3607 		timeout = schedule_timeout(ceph_timeout_jiffies(
3608 						rbd_dev->opts->lock_timeout));
3609 		down_read(&rbd_dev->lock_rwsem);
3610 		if (test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags)) {
3611 			ret = -EBLACKLISTED;
3612 			break;
3613 		}
3614 		if (!timeout) {
3615 			rbd_warn(rbd_dev, "timed out waiting for lock");
3616 			ret = -ETIMEDOUT;
3617 			break;
3618 		}
3619 	} while (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED);
3620 
3621 	finish_wait(&rbd_dev->lock_waitq, &wait);
3622 	return ret;
3623 }
3624 
rbd_queue_workfn(struct work_struct * work)3625 static void rbd_queue_workfn(struct work_struct *work)
3626 {
3627 	struct request *rq = blk_mq_rq_from_pdu(work);
3628 	struct rbd_device *rbd_dev = rq->q->queuedata;
3629 	struct rbd_img_request *img_request;
3630 	struct ceph_snap_context *snapc = NULL;
3631 	u64 offset = (u64)blk_rq_pos(rq) << SECTOR_SHIFT;
3632 	u64 length = blk_rq_bytes(rq);
3633 	enum obj_operation_type op_type;
3634 	u64 mapping_size;
3635 	bool must_be_locked;
3636 	int result;
3637 
3638 	switch (req_op(rq)) {
3639 	case REQ_OP_DISCARD:
3640 	case REQ_OP_WRITE_ZEROES:
3641 		op_type = OBJ_OP_DISCARD;
3642 		break;
3643 	case REQ_OP_WRITE:
3644 		op_type = OBJ_OP_WRITE;
3645 		break;
3646 	case REQ_OP_READ:
3647 		op_type = OBJ_OP_READ;
3648 		break;
3649 	default:
3650 		dout("%s: non-fs request type %d\n", __func__, req_op(rq));
3651 		result = -EIO;
3652 		goto err;
3653 	}
3654 
3655 	/* Ignore/skip any zero-length requests */
3656 
3657 	if (!length) {
3658 		dout("%s: zero-length request\n", __func__);
3659 		result = 0;
3660 		goto err_rq;
3661 	}
3662 
3663 	rbd_assert(op_type == OBJ_OP_READ ||
3664 		   rbd_dev->spec->snap_id == CEPH_NOSNAP);
3665 
3666 	/*
3667 	 * Quit early if the mapped snapshot no longer exists.  It's
3668 	 * still possible the snapshot will have disappeared by the
3669 	 * time our request arrives at the osd, but there's no sense in
3670 	 * sending it if we already know.
3671 	 */
3672 	if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags)) {
3673 		dout("request for non-existent snapshot");
3674 		rbd_assert(rbd_dev->spec->snap_id != CEPH_NOSNAP);
3675 		result = -ENXIO;
3676 		goto err_rq;
3677 	}
3678 
3679 	if (offset && length > U64_MAX - offset + 1) {
3680 		rbd_warn(rbd_dev, "bad request range (%llu~%llu)", offset,
3681 			 length);
3682 		result = -EINVAL;
3683 		goto err_rq;	/* Shouldn't happen */
3684 	}
3685 
3686 	blk_mq_start_request(rq);
3687 
3688 	down_read(&rbd_dev->header_rwsem);
3689 	mapping_size = rbd_dev->mapping.size;
3690 	if (op_type != OBJ_OP_READ) {
3691 		snapc = rbd_dev->header.snapc;
3692 		ceph_get_snap_context(snapc);
3693 	}
3694 	up_read(&rbd_dev->header_rwsem);
3695 
3696 	if (offset + length > mapping_size) {
3697 		rbd_warn(rbd_dev, "beyond EOD (%llu~%llu > %llu)", offset,
3698 			 length, mapping_size);
3699 		result = -EIO;
3700 		goto err_rq;
3701 	}
3702 
3703 	must_be_locked =
3704 	    (rbd_dev->header.features & RBD_FEATURE_EXCLUSIVE_LOCK) &&
3705 	    (op_type != OBJ_OP_READ || rbd_dev->opts->lock_on_read);
3706 	if (must_be_locked) {
3707 		down_read(&rbd_dev->lock_rwsem);
3708 		result = rbd_wait_state_locked(rbd_dev,
3709 					       !rbd_dev->opts->exclusive);
3710 		if (result)
3711 			goto err_unlock;
3712 	}
3713 
3714 	img_request = rbd_img_request_create(rbd_dev, op_type, snapc);
3715 	if (!img_request) {
3716 		result = -ENOMEM;
3717 		goto err_unlock;
3718 	}
3719 	img_request->rq = rq;
3720 	snapc = NULL; /* img_request consumes a ref */
3721 
3722 	if (op_type == OBJ_OP_DISCARD)
3723 		result = rbd_img_fill_nodata(img_request, offset, length);
3724 	else
3725 		result = rbd_img_fill_from_bio(img_request, offset, length,
3726 					       rq->bio);
3727 	if (result)
3728 		goto err_img_request;
3729 
3730 	rbd_img_request_submit(img_request);
3731 	if (must_be_locked)
3732 		up_read(&rbd_dev->lock_rwsem);
3733 	return;
3734 
3735 err_img_request:
3736 	rbd_img_request_put(img_request);
3737 err_unlock:
3738 	if (must_be_locked)
3739 		up_read(&rbd_dev->lock_rwsem);
3740 err_rq:
3741 	if (result)
3742 		rbd_warn(rbd_dev, "%s %llx at %llx result %d",
3743 			 obj_op_name(op_type), length, offset, result);
3744 	ceph_put_snap_context(snapc);
3745 err:
3746 	blk_mq_end_request(rq, errno_to_blk_status(result));
3747 }
3748 
rbd_queue_rq(struct blk_mq_hw_ctx * hctx,const struct blk_mq_queue_data * bd)3749 static blk_status_t rbd_queue_rq(struct blk_mq_hw_ctx *hctx,
3750 		const struct blk_mq_queue_data *bd)
3751 {
3752 	struct request *rq = bd->rq;
3753 	struct work_struct *work = blk_mq_rq_to_pdu(rq);
3754 
3755 	queue_work(rbd_wq, work);
3756 	return BLK_STS_OK;
3757 }
3758 
rbd_free_disk(struct rbd_device * rbd_dev)3759 static void rbd_free_disk(struct rbd_device *rbd_dev)
3760 {
3761 	blk_cleanup_queue(rbd_dev->disk->queue);
3762 	blk_mq_free_tag_set(&rbd_dev->tag_set);
3763 	put_disk(rbd_dev->disk);
3764 	rbd_dev->disk = NULL;
3765 }
3766 
rbd_obj_read_sync(struct rbd_device * rbd_dev,struct ceph_object_id * oid,struct ceph_object_locator * oloc,void * buf,int buf_len)3767 static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
3768 			     struct ceph_object_id *oid,
3769 			     struct ceph_object_locator *oloc,
3770 			     void *buf, int buf_len)
3771 
3772 {
3773 	struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3774 	struct ceph_osd_request *req;
3775 	struct page **pages;
3776 	int num_pages = calc_pages_for(0, buf_len);
3777 	int ret;
3778 
3779 	req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_KERNEL);
3780 	if (!req)
3781 		return -ENOMEM;
3782 
3783 	ceph_oid_copy(&req->r_base_oid, oid);
3784 	ceph_oloc_copy(&req->r_base_oloc, oloc);
3785 	req->r_flags = CEPH_OSD_FLAG_READ;
3786 
3787 	ret = ceph_osdc_alloc_messages(req, GFP_KERNEL);
3788 	if (ret)
3789 		goto out_req;
3790 
3791 	pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
3792 	if (IS_ERR(pages)) {
3793 		ret = PTR_ERR(pages);
3794 		goto out_req;
3795 	}
3796 
3797 	osd_req_op_extent_init(req, 0, CEPH_OSD_OP_READ, 0, buf_len, 0, 0);
3798 	osd_req_op_extent_osd_data_pages(req, 0, pages, buf_len, 0, false,
3799 					 true);
3800 
3801 	ceph_osdc_start_request(osdc, req, false);
3802 	ret = ceph_osdc_wait_request(osdc, req);
3803 	if (ret >= 0)
3804 		ceph_copy_from_page_vector(pages, buf, 0, ret);
3805 
3806 out_req:
3807 	ceph_osdc_put_request(req);
3808 	return ret;
3809 }
3810 
3811 /*
3812  * Read the complete header for the given rbd device.  On successful
3813  * return, the rbd_dev->header field will contain up-to-date
3814  * information about the image.
3815  */
rbd_dev_v1_header_info(struct rbd_device * rbd_dev)3816 static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev)
3817 {
3818 	struct rbd_image_header_ondisk *ondisk = NULL;
3819 	u32 snap_count = 0;
3820 	u64 names_size = 0;
3821 	u32 want_count;
3822 	int ret;
3823 
3824 	/*
3825 	 * The complete header will include an array of its 64-bit
3826 	 * snapshot ids, followed by the names of those snapshots as
3827 	 * a contiguous block of NUL-terminated strings.  Note that
3828 	 * the number of snapshots could change by the time we read
3829 	 * it in, in which case we re-read it.
3830 	 */
3831 	do {
3832 		size_t size;
3833 
3834 		kfree(ondisk);
3835 
3836 		size = sizeof (*ondisk);
3837 		size += snap_count * sizeof (struct rbd_image_snap_ondisk);
3838 		size += names_size;
3839 		ondisk = kmalloc(size, GFP_KERNEL);
3840 		if (!ondisk)
3841 			return -ENOMEM;
3842 
3843 		ret = rbd_obj_read_sync(rbd_dev, &rbd_dev->header_oid,
3844 					&rbd_dev->header_oloc, ondisk, size);
3845 		if (ret < 0)
3846 			goto out;
3847 		if ((size_t)ret < size) {
3848 			ret = -ENXIO;
3849 			rbd_warn(rbd_dev, "short header read (want %zd got %d)",
3850 				size, ret);
3851 			goto out;
3852 		}
3853 		if (!rbd_dev_ondisk_valid(ondisk)) {
3854 			ret = -ENXIO;
3855 			rbd_warn(rbd_dev, "invalid header");
3856 			goto out;
3857 		}
3858 
3859 		names_size = le64_to_cpu(ondisk->snap_names_len);
3860 		want_count = snap_count;
3861 		snap_count = le32_to_cpu(ondisk->snap_count);
3862 	} while (snap_count != want_count);
3863 
3864 	ret = rbd_header_from_disk(rbd_dev, ondisk);
3865 out:
3866 	kfree(ondisk);
3867 
3868 	return ret;
3869 }
3870 
3871 /*
3872  * Clear the rbd device's EXISTS flag if the snapshot it's mapped to
3873  * has disappeared from the (just updated) snapshot context.
3874  */
rbd_exists_validate(struct rbd_device * rbd_dev)3875 static void rbd_exists_validate(struct rbd_device *rbd_dev)
3876 {
3877 	u64 snap_id;
3878 
3879 	if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags))
3880 		return;
3881 
3882 	snap_id = rbd_dev->spec->snap_id;
3883 	if (snap_id == CEPH_NOSNAP)
3884 		return;
3885 
3886 	if (rbd_dev_snap_index(rbd_dev, snap_id) == BAD_SNAP_INDEX)
3887 		clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
3888 }
3889 
rbd_dev_update_size(struct rbd_device * rbd_dev)3890 static void rbd_dev_update_size(struct rbd_device *rbd_dev)
3891 {
3892 	sector_t size;
3893 
3894 	/*
3895 	 * If EXISTS is not set, rbd_dev->disk may be NULL, so don't
3896 	 * try to update its size.  If REMOVING is set, updating size
3897 	 * is just useless work since the device can't be opened.
3898 	 */
3899 	if (test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags) &&
3900 	    !test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags)) {
3901 		size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE;
3902 		dout("setting size to %llu sectors", (unsigned long long)size);
3903 		set_capacity(rbd_dev->disk, size);
3904 		revalidate_disk(rbd_dev->disk);
3905 	}
3906 }
3907 
rbd_dev_refresh(struct rbd_device * rbd_dev)3908 static int rbd_dev_refresh(struct rbd_device *rbd_dev)
3909 {
3910 	u64 mapping_size;
3911 	int ret;
3912 
3913 	down_write(&rbd_dev->header_rwsem);
3914 	mapping_size = rbd_dev->mapping.size;
3915 
3916 	ret = rbd_dev_header_info(rbd_dev);
3917 	if (ret)
3918 		goto out;
3919 
3920 	/*
3921 	 * If there is a parent, see if it has disappeared due to the
3922 	 * mapped image getting flattened.
3923 	 */
3924 	if (rbd_dev->parent) {
3925 		ret = rbd_dev_v2_parent_info(rbd_dev);
3926 		if (ret)
3927 			goto out;
3928 	}
3929 
3930 	if (rbd_dev->spec->snap_id == CEPH_NOSNAP) {
3931 		rbd_dev->mapping.size = rbd_dev->header.image_size;
3932 	} else {
3933 		/* validate mapped snapshot's EXISTS flag */
3934 		rbd_exists_validate(rbd_dev);
3935 	}
3936 
3937 out:
3938 	up_write(&rbd_dev->header_rwsem);
3939 	if (!ret && mapping_size != rbd_dev->mapping.size)
3940 		rbd_dev_update_size(rbd_dev);
3941 
3942 	return ret;
3943 }
3944 
rbd_init_request(struct blk_mq_tag_set * set,struct request * rq,unsigned int hctx_idx,unsigned int numa_node)3945 static int rbd_init_request(struct blk_mq_tag_set *set, struct request *rq,
3946 		unsigned int hctx_idx, unsigned int numa_node)
3947 {
3948 	struct work_struct *work = blk_mq_rq_to_pdu(rq);
3949 
3950 	INIT_WORK(work, rbd_queue_workfn);
3951 	return 0;
3952 }
3953 
3954 static const struct blk_mq_ops rbd_mq_ops = {
3955 	.queue_rq	= rbd_queue_rq,
3956 	.init_request	= rbd_init_request,
3957 };
3958 
rbd_init_disk(struct rbd_device * rbd_dev)3959 static int rbd_init_disk(struct rbd_device *rbd_dev)
3960 {
3961 	struct gendisk *disk;
3962 	struct request_queue *q;
3963 	unsigned int objset_bytes =
3964 	    rbd_dev->layout.object_size * rbd_dev->layout.stripe_count;
3965 	int err;
3966 
3967 	/* create gendisk info */
3968 	disk = alloc_disk(single_major ?
3969 			  (1 << RBD_SINGLE_MAJOR_PART_SHIFT) :
3970 			  RBD_MINORS_PER_MAJOR);
3971 	if (!disk)
3972 		return -ENOMEM;
3973 
3974 	snprintf(disk->disk_name, sizeof(disk->disk_name), RBD_DRV_NAME "%d",
3975 		 rbd_dev->dev_id);
3976 	disk->major = rbd_dev->major;
3977 	disk->first_minor = rbd_dev->minor;
3978 	if (single_major)
3979 		disk->flags |= GENHD_FL_EXT_DEVT;
3980 	disk->fops = &rbd_bd_ops;
3981 	disk->private_data = rbd_dev;
3982 
3983 	memset(&rbd_dev->tag_set, 0, sizeof(rbd_dev->tag_set));
3984 	rbd_dev->tag_set.ops = &rbd_mq_ops;
3985 	rbd_dev->tag_set.queue_depth = rbd_dev->opts->queue_depth;
3986 	rbd_dev->tag_set.numa_node = NUMA_NO_NODE;
3987 	rbd_dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE;
3988 	rbd_dev->tag_set.nr_hw_queues = 1;
3989 	rbd_dev->tag_set.cmd_size = sizeof(struct work_struct);
3990 
3991 	err = blk_mq_alloc_tag_set(&rbd_dev->tag_set);
3992 	if (err)
3993 		goto out_disk;
3994 
3995 	q = blk_mq_init_queue(&rbd_dev->tag_set);
3996 	if (IS_ERR(q)) {
3997 		err = PTR_ERR(q);
3998 		goto out_tag_set;
3999 	}
4000 
4001 	blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
4002 	/* QUEUE_FLAG_ADD_RANDOM is off by default for blk-mq */
4003 
4004 	blk_queue_max_hw_sectors(q, objset_bytes >> SECTOR_SHIFT);
4005 	q->limits.max_sectors = queue_max_hw_sectors(q);
4006 	blk_queue_max_segments(q, USHRT_MAX);
4007 	blk_queue_max_segment_size(q, UINT_MAX);
4008 	blk_queue_io_min(q, objset_bytes);
4009 	blk_queue_io_opt(q, objset_bytes);
4010 
4011 	if (rbd_dev->opts->trim) {
4012 		blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
4013 		q->limits.discard_granularity = objset_bytes;
4014 		blk_queue_max_discard_sectors(q, objset_bytes >> SECTOR_SHIFT);
4015 		blk_queue_max_write_zeroes_sectors(q, objset_bytes >> SECTOR_SHIFT);
4016 	}
4017 
4018 	if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC))
4019 		q->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES;
4020 
4021 	/*
4022 	 * disk_release() expects a queue ref from add_disk() and will
4023 	 * put it.  Hold an extra ref until add_disk() is called.
4024 	 */
4025 	WARN_ON(!blk_get_queue(q));
4026 	disk->queue = q;
4027 	q->queuedata = rbd_dev;
4028 
4029 	rbd_dev->disk = disk;
4030 
4031 	return 0;
4032 out_tag_set:
4033 	blk_mq_free_tag_set(&rbd_dev->tag_set);
4034 out_disk:
4035 	put_disk(disk);
4036 	return err;
4037 }
4038 
4039 /*
4040   sysfs
4041 */
4042 
dev_to_rbd_dev(struct device * dev)4043 static struct rbd_device *dev_to_rbd_dev(struct device *dev)
4044 {
4045 	return container_of(dev, struct rbd_device, dev);
4046 }
4047 
rbd_size_show(struct device * dev,struct device_attribute * attr,char * buf)4048 static ssize_t rbd_size_show(struct device *dev,
4049 			     struct device_attribute *attr, char *buf)
4050 {
4051 	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4052 
4053 	return sprintf(buf, "%llu\n",
4054 		(unsigned long long)rbd_dev->mapping.size);
4055 }
4056 
4057 /*
4058  * Note this shows the features for whatever's mapped, which is not
4059  * necessarily the base image.
4060  */
rbd_features_show(struct device * dev,struct device_attribute * attr,char * buf)4061 static ssize_t rbd_features_show(struct device *dev,
4062 			     struct device_attribute *attr, char *buf)
4063 {
4064 	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4065 
4066 	return sprintf(buf, "0x%016llx\n",
4067 			(unsigned long long)rbd_dev->mapping.features);
4068 }
4069 
rbd_major_show(struct device * dev,struct device_attribute * attr,char * buf)4070 static ssize_t rbd_major_show(struct device *dev,
4071 			      struct device_attribute *attr, char *buf)
4072 {
4073 	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4074 
4075 	if (rbd_dev->major)
4076 		return sprintf(buf, "%d\n", rbd_dev->major);
4077 
4078 	return sprintf(buf, "(none)\n");
4079 }
4080 
rbd_minor_show(struct device * dev,struct device_attribute * attr,char * buf)4081 static ssize_t rbd_minor_show(struct device *dev,
4082 			      struct device_attribute *attr, char *buf)
4083 {
4084 	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4085 
4086 	return sprintf(buf, "%d\n", rbd_dev->minor);
4087 }
4088 
rbd_client_addr_show(struct device * dev,struct device_attribute * attr,char * buf)4089 static ssize_t rbd_client_addr_show(struct device *dev,
4090 				    struct device_attribute *attr, char *buf)
4091 {
4092 	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4093 	struct ceph_entity_addr *client_addr =
4094 	    ceph_client_addr(rbd_dev->rbd_client->client);
4095 
4096 	return sprintf(buf, "%pISpc/%u\n", &client_addr->in_addr,
4097 		       le32_to_cpu(client_addr->nonce));
4098 }
4099 
rbd_client_id_show(struct device * dev,struct device_attribute * attr,char * buf)4100 static ssize_t rbd_client_id_show(struct device *dev,
4101 				  struct device_attribute *attr, char *buf)
4102 {
4103 	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4104 
4105 	return sprintf(buf, "client%lld\n",
4106 		       ceph_client_gid(rbd_dev->rbd_client->client));
4107 }
4108 
rbd_cluster_fsid_show(struct device * dev,struct device_attribute * attr,char * buf)4109 static ssize_t rbd_cluster_fsid_show(struct device *dev,
4110 				     struct device_attribute *attr, char *buf)
4111 {
4112 	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4113 
4114 	return sprintf(buf, "%pU\n", &rbd_dev->rbd_client->client->fsid);
4115 }
4116 
rbd_config_info_show(struct device * dev,struct device_attribute * attr,char * buf)4117 static ssize_t rbd_config_info_show(struct device *dev,
4118 				    struct device_attribute *attr, char *buf)
4119 {
4120 	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4121 
4122 	return sprintf(buf, "%s\n", rbd_dev->config_info);
4123 }
4124 
rbd_pool_show(struct device * dev,struct device_attribute * attr,char * buf)4125 static ssize_t rbd_pool_show(struct device *dev,
4126 			     struct device_attribute *attr, char *buf)
4127 {
4128 	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4129 
4130 	return sprintf(buf, "%s\n", rbd_dev->spec->pool_name);
4131 }
4132 
rbd_pool_id_show(struct device * dev,struct device_attribute * attr,char * buf)4133 static ssize_t rbd_pool_id_show(struct device *dev,
4134 			     struct device_attribute *attr, char *buf)
4135 {
4136 	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4137 
4138 	return sprintf(buf, "%llu\n",
4139 			(unsigned long long) rbd_dev->spec->pool_id);
4140 }
4141 
rbd_pool_ns_show(struct device * dev,struct device_attribute * attr,char * buf)4142 static ssize_t rbd_pool_ns_show(struct device *dev,
4143 				struct device_attribute *attr, char *buf)
4144 {
4145 	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4146 
4147 	return sprintf(buf, "%s\n", rbd_dev->spec->pool_ns ?: "");
4148 }
4149 
rbd_name_show(struct device * dev,struct device_attribute * attr,char * buf)4150 static ssize_t rbd_name_show(struct device *dev,
4151 			     struct device_attribute *attr, char *buf)
4152 {
4153 	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4154 
4155 	if (rbd_dev->spec->image_name)
4156 		return sprintf(buf, "%s\n", rbd_dev->spec->image_name);
4157 
4158 	return sprintf(buf, "(unknown)\n");
4159 }
4160 
rbd_image_id_show(struct device * dev,struct device_attribute * attr,char * buf)4161 static ssize_t rbd_image_id_show(struct device *dev,
4162 			     struct device_attribute *attr, char *buf)
4163 {
4164 	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4165 
4166 	return sprintf(buf, "%s\n", rbd_dev->spec->image_id);
4167 }
4168 
4169 /*
4170  * Shows the name of the currently-mapped snapshot (or
4171  * RBD_SNAP_HEAD_NAME for the base image).
4172  */
rbd_snap_show(struct device * dev,struct device_attribute * attr,char * buf)4173 static ssize_t rbd_snap_show(struct device *dev,
4174 			     struct device_attribute *attr,
4175 			     char *buf)
4176 {
4177 	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4178 
4179 	return sprintf(buf, "%s\n", rbd_dev->spec->snap_name);
4180 }
4181 
rbd_snap_id_show(struct device * dev,struct device_attribute * attr,char * buf)4182 static ssize_t rbd_snap_id_show(struct device *dev,
4183 				struct device_attribute *attr, char *buf)
4184 {
4185 	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4186 
4187 	return sprintf(buf, "%llu\n", rbd_dev->spec->snap_id);
4188 }
4189 
4190 /*
4191  * For a v2 image, shows the chain of parent images, separated by empty
4192  * lines.  For v1 images or if there is no parent, shows "(no parent
4193  * image)".
4194  */
rbd_parent_show(struct device * dev,struct device_attribute * attr,char * buf)4195 static ssize_t rbd_parent_show(struct device *dev,
4196 			       struct device_attribute *attr,
4197 			       char *buf)
4198 {
4199 	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4200 	ssize_t count = 0;
4201 
4202 	if (!rbd_dev->parent)
4203 		return sprintf(buf, "(no parent image)\n");
4204 
4205 	for ( ; rbd_dev->parent; rbd_dev = rbd_dev->parent) {
4206 		struct rbd_spec *spec = rbd_dev->parent_spec;
4207 
4208 		count += sprintf(&buf[count], "%s"
4209 			    "pool_id %llu\npool_name %s\n"
4210 			    "pool_ns %s\n"
4211 			    "image_id %s\nimage_name %s\n"
4212 			    "snap_id %llu\nsnap_name %s\n"
4213 			    "overlap %llu\n",
4214 			    !count ? "" : "\n", /* first? */
4215 			    spec->pool_id, spec->pool_name,
4216 			    spec->pool_ns ?: "",
4217 			    spec->image_id, spec->image_name ?: "(unknown)",
4218 			    spec->snap_id, spec->snap_name,
4219 			    rbd_dev->parent_overlap);
4220 	}
4221 
4222 	return count;
4223 }
4224 
rbd_image_refresh(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)4225 static ssize_t rbd_image_refresh(struct device *dev,
4226 				 struct device_attribute *attr,
4227 				 const char *buf,
4228 				 size_t size)
4229 {
4230 	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4231 	int ret;
4232 
4233 	ret = rbd_dev_refresh(rbd_dev);
4234 	if (ret)
4235 		return ret;
4236 
4237 	return size;
4238 }
4239 
4240 static DEVICE_ATTR(size, 0444, rbd_size_show, NULL);
4241 static DEVICE_ATTR(features, 0444, rbd_features_show, NULL);
4242 static DEVICE_ATTR(major, 0444, rbd_major_show, NULL);
4243 static DEVICE_ATTR(minor, 0444, rbd_minor_show, NULL);
4244 static DEVICE_ATTR(client_addr, 0444, rbd_client_addr_show, NULL);
4245 static DEVICE_ATTR(client_id, 0444, rbd_client_id_show, NULL);
4246 static DEVICE_ATTR(cluster_fsid, 0444, rbd_cluster_fsid_show, NULL);
4247 static DEVICE_ATTR(config_info, 0400, rbd_config_info_show, NULL);
4248 static DEVICE_ATTR(pool, 0444, rbd_pool_show, NULL);
4249 static DEVICE_ATTR(pool_id, 0444, rbd_pool_id_show, NULL);
4250 static DEVICE_ATTR(pool_ns, 0444, rbd_pool_ns_show, NULL);
4251 static DEVICE_ATTR(name, 0444, rbd_name_show, NULL);
4252 static DEVICE_ATTR(image_id, 0444, rbd_image_id_show, NULL);
4253 static DEVICE_ATTR(refresh, 0200, NULL, rbd_image_refresh);
4254 static DEVICE_ATTR(current_snap, 0444, rbd_snap_show, NULL);
4255 static DEVICE_ATTR(snap_id, 0444, rbd_snap_id_show, NULL);
4256 static DEVICE_ATTR(parent, 0444, rbd_parent_show, NULL);
4257 
4258 static struct attribute *rbd_attrs[] = {
4259 	&dev_attr_size.attr,
4260 	&dev_attr_features.attr,
4261 	&dev_attr_major.attr,
4262 	&dev_attr_minor.attr,
4263 	&dev_attr_client_addr.attr,
4264 	&dev_attr_client_id.attr,
4265 	&dev_attr_cluster_fsid.attr,
4266 	&dev_attr_config_info.attr,
4267 	&dev_attr_pool.attr,
4268 	&dev_attr_pool_id.attr,
4269 	&dev_attr_pool_ns.attr,
4270 	&dev_attr_name.attr,
4271 	&dev_attr_image_id.attr,
4272 	&dev_attr_current_snap.attr,
4273 	&dev_attr_snap_id.attr,
4274 	&dev_attr_parent.attr,
4275 	&dev_attr_refresh.attr,
4276 	NULL
4277 };
4278 
4279 static struct attribute_group rbd_attr_group = {
4280 	.attrs = rbd_attrs,
4281 };
4282 
4283 static const struct attribute_group *rbd_attr_groups[] = {
4284 	&rbd_attr_group,
4285 	NULL
4286 };
4287 
4288 static void rbd_dev_release(struct device *dev);
4289 
4290 static const struct device_type rbd_device_type = {
4291 	.name		= "rbd",
4292 	.groups		= rbd_attr_groups,
4293 	.release	= rbd_dev_release,
4294 };
4295 
rbd_spec_get(struct rbd_spec * spec)4296 static struct rbd_spec *rbd_spec_get(struct rbd_spec *spec)
4297 {
4298 	kref_get(&spec->kref);
4299 
4300 	return spec;
4301 }
4302 
4303 static void rbd_spec_free(struct kref *kref);
rbd_spec_put(struct rbd_spec * spec)4304 static void rbd_spec_put(struct rbd_spec *spec)
4305 {
4306 	if (spec)
4307 		kref_put(&spec->kref, rbd_spec_free);
4308 }
4309 
rbd_spec_alloc(void)4310 static struct rbd_spec *rbd_spec_alloc(void)
4311 {
4312 	struct rbd_spec *spec;
4313 
4314 	spec = kzalloc(sizeof (*spec), GFP_KERNEL);
4315 	if (!spec)
4316 		return NULL;
4317 
4318 	spec->pool_id = CEPH_NOPOOL;
4319 	spec->snap_id = CEPH_NOSNAP;
4320 	kref_init(&spec->kref);
4321 
4322 	return spec;
4323 }
4324 
rbd_spec_free(struct kref * kref)4325 static void rbd_spec_free(struct kref *kref)
4326 {
4327 	struct rbd_spec *spec = container_of(kref, struct rbd_spec, kref);
4328 
4329 	kfree(spec->pool_name);
4330 	kfree(spec->pool_ns);
4331 	kfree(spec->image_id);
4332 	kfree(spec->image_name);
4333 	kfree(spec->snap_name);
4334 	kfree(spec);
4335 }
4336 
rbd_dev_free(struct rbd_device * rbd_dev)4337 static void rbd_dev_free(struct rbd_device *rbd_dev)
4338 {
4339 	WARN_ON(rbd_dev->watch_state != RBD_WATCH_STATE_UNREGISTERED);
4340 	WARN_ON(rbd_dev->lock_state != RBD_LOCK_STATE_UNLOCKED);
4341 
4342 	ceph_oid_destroy(&rbd_dev->header_oid);
4343 	ceph_oloc_destroy(&rbd_dev->header_oloc);
4344 	kfree(rbd_dev->config_info);
4345 
4346 	rbd_put_client(rbd_dev->rbd_client);
4347 	rbd_spec_put(rbd_dev->spec);
4348 	kfree(rbd_dev->opts);
4349 	kfree(rbd_dev);
4350 }
4351 
rbd_dev_release(struct device * dev)4352 static void rbd_dev_release(struct device *dev)
4353 {
4354 	struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4355 	bool need_put = !!rbd_dev->opts;
4356 
4357 	if (need_put) {
4358 		destroy_workqueue(rbd_dev->task_wq);
4359 		ida_simple_remove(&rbd_dev_id_ida, rbd_dev->dev_id);
4360 	}
4361 
4362 	rbd_dev_free(rbd_dev);
4363 
4364 	/*
4365 	 * This is racy, but way better than putting module outside of
4366 	 * the release callback.  The race window is pretty small, so
4367 	 * doing something similar to dm (dm-builtin.c) is overkill.
4368 	 */
4369 	if (need_put)
4370 		module_put(THIS_MODULE);
4371 }
4372 
__rbd_dev_create(struct rbd_client * rbdc,struct rbd_spec * spec)4373 static struct rbd_device *__rbd_dev_create(struct rbd_client *rbdc,
4374 					   struct rbd_spec *spec)
4375 {
4376 	struct rbd_device *rbd_dev;
4377 
4378 	rbd_dev = kzalloc(sizeof(*rbd_dev), GFP_KERNEL);
4379 	if (!rbd_dev)
4380 		return NULL;
4381 
4382 	spin_lock_init(&rbd_dev->lock);
4383 	INIT_LIST_HEAD(&rbd_dev->node);
4384 	init_rwsem(&rbd_dev->header_rwsem);
4385 
4386 	rbd_dev->header.data_pool_id = CEPH_NOPOOL;
4387 	ceph_oid_init(&rbd_dev->header_oid);
4388 	rbd_dev->header_oloc.pool = spec->pool_id;
4389 	if (spec->pool_ns) {
4390 		WARN_ON(!*spec->pool_ns);
4391 		rbd_dev->header_oloc.pool_ns =
4392 		    ceph_find_or_create_string(spec->pool_ns,
4393 					       strlen(spec->pool_ns));
4394 	}
4395 
4396 	mutex_init(&rbd_dev->watch_mutex);
4397 	rbd_dev->watch_state = RBD_WATCH_STATE_UNREGISTERED;
4398 	INIT_DELAYED_WORK(&rbd_dev->watch_dwork, rbd_reregister_watch);
4399 
4400 	init_rwsem(&rbd_dev->lock_rwsem);
4401 	rbd_dev->lock_state = RBD_LOCK_STATE_UNLOCKED;
4402 	INIT_WORK(&rbd_dev->acquired_lock_work, rbd_notify_acquired_lock);
4403 	INIT_WORK(&rbd_dev->released_lock_work, rbd_notify_released_lock);
4404 	INIT_DELAYED_WORK(&rbd_dev->lock_dwork, rbd_acquire_lock);
4405 	INIT_WORK(&rbd_dev->unlock_work, rbd_release_lock_work);
4406 	init_waitqueue_head(&rbd_dev->lock_waitq);
4407 
4408 	rbd_dev->dev.bus = &rbd_bus_type;
4409 	rbd_dev->dev.type = &rbd_device_type;
4410 	rbd_dev->dev.parent = &rbd_root_dev;
4411 	device_initialize(&rbd_dev->dev);
4412 
4413 	rbd_dev->rbd_client = rbdc;
4414 	rbd_dev->spec = spec;
4415 
4416 	return rbd_dev;
4417 }
4418 
4419 /*
4420  * Create a mapping rbd_dev.
4421  */
rbd_dev_create(struct rbd_client * rbdc,struct rbd_spec * spec,struct rbd_options * opts)4422 static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
4423 					 struct rbd_spec *spec,
4424 					 struct rbd_options *opts)
4425 {
4426 	struct rbd_device *rbd_dev;
4427 
4428 	rbd_dev = __rbd_dev_create(rbdc, spec);
4429 	if (!rbd_dev)
4430 		return NULL;
4431 
4432 	rbd_dev->opts = opts;
4433 
4434 	/* get an id and fill in device name */
4435 	rbd_dev->dev_id = ida_simple_get(&rbd_dev_id_ida, 0,
4436 					 minor_to_rbd_dev_id(1 << MINORBITS),
4437 					 GFP_KERNEL);
4438 	if (rbd_dev->dev_id < 0)
4439 		goto fail_rbd_dev;
4440 
4441 	sprintf(rbd_dev->name, RBD_DRV_NAME "%d", rbd_dev->dev_id);
4442 	rbd_dev->task_wq = alloc_ordered_workqueue("%s-tasks", WQ_MEM_RECLAIM,
4443 						   rbd_dev->name);
4444 	if (!rbd_dev->task_wq)
4445 		goto fail_dev_id;
4446 
4447 	/* we have a ref from do_rbd_add() */
4448 	__module_get(THIS_MODULE);
4449 
4450 	dout("%s rbd_dev %p dev_id %d\n", __func__, rbd_dev, rbd_dev->dev_id);
4451 	return rbd_dev;
4452 
4453 fail_dev_id:
4454 	ida_simple_remove(&rbd_dev_id_ida, rbd_dev->dev_id);
4455 fail_rbd_dev:
4456 	rbd_dev_free(rbd_dev);
4457 	return NULL;
4458 }
4459 
rbd_dev_destroy(struct rbd_device * rbd_dev)4460 static void rbd_dev_destroy(struct rbd_device *rbd_dev)
4461 {
4462 	if (rbd_dev)
4463 		put_device(&rbd_dev->dev);
4464 }
4465 
4466 /*
4467  * Get the size and object order for an image snapshot, or if
4468  * snap_id is CEPH_NOSNAP, gets this information for the base
4469  * image.
4470  */
_rbd_dev_v2_snap_size(struct rbd_device * rbd_dev,u64 snap_id,u8 * order,u64 * snap_size)4471 static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
4472 				u8 *order, u64 *snap_size)
4473 {
4474 	__le64 snapid = cpu_to_le64(snap_id);
4475 	int ret;
4476 	struct {
4477 		u8 order;
4478 		__le64 size;
4479 	} __attribute__ ((packed)) size_buf = { 0 };
4480 
4481 	ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
4482 				  &rbd_dev->header_oloc, "get_size",
4483 				  &snapid, sizeof(snapid),
4484 				  &size_buf, sizeof(size_buf));
4485 	dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4486 	if (ret < 0)
4487 		return ret;
4488 	if (ret < sizeof (size_buf))
4489 		return -ERANGE;
4490 
4491 	if (order) {
4492 		*order = size_buf.order;
4493 		dout("  order %u", (unsigned int)*order);
4494 	}
4495 	*snap_size = le64_to_cpu(size_buf.size);
4496 
4497 	dout("  snap_id 0x%016llx snap_size = %llu\n",
4498 		(unsigned long long)snap_id,
4499 		(unsigned long long)*snap_size);
4500 
4501 	return 0;
4502 }
4503 
rbd_dev_v2_image_size(struct rbd_device * rbd_dev)4504 static int rbd_dev_v2_image_size(struct rbd_device *rbd_dev)
4505 {
4506 	return _rbd_dev_v2_snap_size(rbd_dev, CEPH_NOSNAP,
4507 					&rbd_dev->header.obj_order,
4508 					&rbd_dev->header.image_size);
4509 }
4510 
rbd_dev_v2_object_prefix(struct rbd_device * rbd_dev)4511 static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev)
4512 {
4513 	void *reply_buf;
4514 	int ret;
4515 	void *p;
4516 
4517 	reply_buf = kzalloc(RBD_OBJ_PREFIX_LEN_MAX, GFP_KERNEL);
4518 	if (!reply_buf)
4519 		return -ENOMEM;
4520 
4521 	ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
4522 				  &rbd_dev->header_oloc, "get_object_prefix",
4523 				  NULL, 0, reply_buf, RBD_OBJ_PREFIX_LEN_MAX);
4524 	dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4525 	if (ret < 0)
4526 		goto out;
4527 
4528 	p = reply_buf;
4529 	rbd_dev->header.object_prefix = ceph_extract_encoded_string(&p,
4530 						p + ret, NULL, GFP_NOIO);
4531 	ret = 0;
4532 
4533 	if (IS_ERR(rbd_dev->header.object_prefix)) {
4534 		ret = PTR_ERR(rbd_dev->header.object_prefix);
4535 		rbd_dev->header.object_prefix = NULL;
4536 	} else {
4537 		dout("  object_prefix = %s\n", rbd_dev->header.object_prefix);
4538 	}
4539 out:
4540 	kfree(reply_buf);
4541 
4542 	return ret;
4543 }
4544 
_rbd_dev_v2_snap_features(struct rbd_device * rbd_dev,u64 snap_id,u64 * snap_features)4545 static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
4546 		u64 *snap_features)
4547 {
4548 	__le64 snapid = cpu_to_le64(snap_id);
4549 	struct {
4550 		__le64 features;
4551 		__le64 incompat;
4552 	} __attribute__ ((packed)) features_buf = { 0 };
4553 	u64 unsup;
4554 	int ret;
4555 
4556 	ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
4557 				  &rbd_dev->header_oloc, "get_features",
4558 				  &snapid, sizeof(snapid),
4559 				  &features_buf, sizeof(features_buf));
4560 	dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4561 	if (ret < 0)
4562 		return ret;
4563 	if (ret < sizeof (features_buf))
4564 		return -ERANGE;
4565 
4566 	unsup = le64_to_cpu(features_buf.incompat) & ~RBD_FEATURES_SUPPORTED;
4567 	if (unsup) {
4568 		rbd_warn(rbd_dev, "image uses unsupported features: 0x%llx",
4569 			 unsup);
4570 		return -ENXIO;
4571 	}
4572 
4573 	*snap_features = le64_to_cpu(features_buf.features);
4574 
4575 	dout("  snap_id 0x%016llx features = 0x%016llx incompat = 0x%016llx\n",
4576 		(unsigned long long)snap_id,
4577 		(unsigned long long)*snap_features,
4578 		(unsigned long long)le64_to_cpu(features_buf.incompat));
4579 
4580 	return 0;
4581 }
4582 
rbd_dev_v2_features(struct rbd_device * rbd_dev)4583 static int rbd_dev_v2_features(struct rbd_device *rbd_dev)
4584 {
4585 	return _rbd_dev_v2_snap_features(rbd_dev, CEPH_NOSNAP,
4586 						&rbd_dev->header.features);
4587 }
4588 
4589 struct parent_image_info {
4590 	u64		pool_id;
4591 	const char	*pool_ns;
4592 	const char	*image_id;
4593 	u64		snap_id;
4594 
4595 	bool		has_overlap;
4596 	u64		overlap;
4597 };
4598 
4599 /*
4600  * The caller is responsible for @pii.
4601  */
decode_parent_image_spec(void ** p,void * end,struct parent_image_info * pii)4602 static int decode_parent_image_spec(void **p, void *end,
4603 				    struct parent_image_info *pii)
4604 {
4605 	u8 struct_v;
4606 	u32 struct_len;
4607 	int ret;
4608 
4609 	ret = ceph_start_decoding(p, end, 1, "ParentImageSpec",
4610 				  &struct_v, &struct_len);
4611 	if (ret)
4612 		return ret;
4613 
4614 	ceph_decode_64_safe(p, end, pii->pool_id, e_inval);
4615 	pii->pool_ns = ceph_extract_encoded_string(p, end, NULL, GFP_KERNEL);
4616 	if (IS_ERR(pii->pool_ns)) {
4617 		ret = PTR_ERR(pii->pool_ns);
4618 		pii->pool_ns = NULL;
4619 		return ret;
4620 	}
4621 	pii->image_id = ceph_extract_encoded_string(p, end, NULL, GFP_KERNEL);
4622 	if (IS_ERR(pii->image_id)) {
4623 		ret = PTR_ERR(pii->image_id);
4624 		pii->image_id = NULL;
4625 		return ret;
4626 	}
4627 	ceph_decode_64_safe(p, end, pii->snap_id, e_inval);
4628 	return 0;
4629 
4630 e_inval:
4631 	return -EINVAL;
4632 }
4633 
__get_parent_info(struct rbd_device * rbd_dev,struct page * req_page,struct page * reply_page,struct parent_image_info * pii)4634 static int __get_parent_info(struct rbd_device *rbd_dev,
4635 			     struct page *req_page,
4636 			     struct page *reply_page,
4637 			     struct parent_image_info *pii)
4638 {
4639 	struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4640 	size_t reply_len = PAGE_SIZE;
4641 	void *p, *end;
4642 	int ret;
4643 
4644 	ret = ceph_osdc_call(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
4645 			     "rbd", "parent_get", CEPH_OSD_FLAG_READ,
4646 			     req_page, sizeof(u64), reply_page, &reply_len);
4647 	if (ret)
4648 		return ret == -EOPNOTSUPP ? 1 : ret;
4649 
4650 	p = page_address(reply_page);
4651 	end = p + reply_len;
4652 	ret = decode_parent_image_spec(&p, end, pii);
4653 	if (ret)
4654 		return ret;
4655 
4656 	ret = ceph_osdc_call(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
4657 			     "rbd", "parent_overlap_get", CEPH_OSD_FLAG_READ,
4658 			     req_page, sizeof(u64), reply_page, &reply_len);
4659 	if (ret)
4660 		return ret;
4661 
4662 	p = page_address(reply_page);
4663 	end = p + reply_len;
4664 	ceph_decode_8_safe(&p, end, pii->has_overlap, e_inval);
4665 	if (pii->has_overlap)
4666 		ceph_decode_64_safe(&p, end, pii->overlap, e_inval);
4667 
4668 	return 0;
4669 
4670 e_inval:
4671 	return -EINVAL;
4672 }
4673 
4674 /*
4675  * The caller is responsible for @pii.
4676  */
__get_parent_info_legacy(struct rbd_device * rbd_dev,struct page * req_page,struct page * reply_page,struct parent_image_info * pii)4677 static int __get_parent_info_legacy(struct rbd_device *rbd_dev,
4678 				    struct page *req_page,
4679 				    struct page *reply_page,
4680 				    struct parent_image_info *pii)
4681 {
4682 	struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4683 	size_t reply_len = PAGE_SIZE;
4684 	void *p, *end;
4685 	int ret;
4686 
4687 	ret = ceph_osdc_call(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
4688 			     "rbd", "get_parent", CEPH_OSD_FLAG_READ,
4689 			     req_page, sizeof(u64), reply_page, &reply_len);
4690 	if (ret)
4691 		return ret;
4692 
4693 	p = page_address(reply_page);
4694 	end = p + reply_len;
4695 	ceph_decode_64_safe(&p, end, pii->pool_id, e_inval);
4696 	pii->image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
4697 	if (IS_ERR(pii->image_id)) {
4698 		ret = PTR_ERR(pii->image_id);
4699 		pii->image_id = NULL;
4700 		return ret;
4701 	}
4702 	ceph_decode_64_safe(&p, end, pii->snap_id, e_inval);
4703 	pii->has_overlap = true;
4704 	ceph_decode_64_safe(&p, end, pii->overlap, e_inval);
4705 
4706 	return 0;
4707 
4708 e_inval:
4709 	return -EINVAL;
4710 }
4711 
get_parent_info(struct rbd_device * rbd_dev,struct parent_image_info * pii)4712 static int get_parent_info(struct rbd_device *rbd_dev,
4713 			   struct parent_image_info *pii)
4714 {
4715 	struct page *req_page, *reply_page;
4716 	void *p;
4717 	int ret;
4718 
4719 	req_page = alloc_page(GFP_KERNEL);
4720 	if (!req_page)
4721 		return -ENOMEM;
4722 
4723 	reply_page = alloc_page(GFP_KERNEL);
4724 	if (!reply_page) {
4725 		__free_page(req_page);
4726 		return -ENOMEM;
4727 	}
4728 
4729 	p = page_address(req_page);
4730 	ceph_encode_64(&p, rbd_dev->spec->snap_id);
4731 	ret = __get_parent_info(rbd_dev, req_page, reply_page, pii);
4732 	if (ret > 0)
4733 		ret = __get_parent_info_legacy(rbd_dev, req_page, reply_page,
4734 					       pii);
4735 
4736 	__free_page(req_page);
4737 	__free_page(reply_page);
4738 	return ret;
4739 }
4740 
rbd_dev_v2_parent_info(struct rbd_device * rbd_dev)4741 static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
4742 {
4743 	struct rbd_spec *parent_spec;
4744 	struct parent_image_info pii = { 0 };
4745 	int ret;
4746 
4747 	parent_spec = rbd_spec_alloc();
4748 	if (!parent_spec)
4749 		return -ENOMEM;
4750 
4751 	ret = get_parent_info(rbd_dev, &pii);
4752 	if (ret)
4753 		goto out_err;
4754 
4755 	dout("%s pool_id %llu pool_ns %s image_id %s snap_id %llu has_overlap %d overlap %llu\n",
4756 	     __func__, pii.pool_id, pii.pool_ns, pii.image_id, pii.snap_id,
4757 	     pii.has_overlap, pii.overlap);
4758 
4759 	if (pii.pool_id == CEPH_NOPOOL || !pii.has_overlap) {
4760 		/*
4761 		 * Either the parent never existed, or we have
4762 		 * record of it but the image got flattened so it no
4763 		 * longer has a parent.  When the parent of a
4764 		 * layered image disappears we immediately set the
4765 		 * overlap to 0.  The effect of this is that all new
4766 		 * requests will be treated as if the image had no
4767 		 * parent.
4768 		 *
4769 		 * If !pii.has_overlap, the parent image spec is not
4770 		 * applicable.  It's there to avoid duplication in each
4771 		 * snapshot record.
4772 		 */
4773 		if (rbd_dev->parent_overlap) {
4774 			rbd_dev->parent_overlap = 0;
4775 			rbd_dev_parent_put(rbd_dev);
4776 			pr_info("%s: clone image has been flattened\n",
4777 				rbd_dev->disk->disk_name);
4778 		}
4779 
4780 		goto out;	/* No parent?  No problem. */
4781 	}
4782 
4783 	/* The ceph file layout needs to fit pool id in 32 bits */
4784 
4785 	ret = -EIO;
4786 	if (pii.pool_id > (u64)U32_MAX) {
4787 		rbd_warn(NULL, "parent pool id too large (%llu > %u)",
4788 			(unsigned long long)pii.pool_id, U32_MAX);
4789 		goto out_err;
4790 	}
4791 
4792 	/*
4793 	 * The parent won't change (except when the clone is
4794 	 * flattened, already handled that).  So we only need to
4795 	 * record the parent spec we have not already done so.
4796 	 */
4797 	if (!rbd_dev->parent_spec) {
4798 		parent_spec->pool_id = pii.pool_id;
4799 		if (pii.pool_ns && *pii.pool_ns) {
4800 			parent_spec->pool_ns = pii.pool_ns;
4801 			pii.pool_ns = NULL;
4802 		}
4803 		parent_spec->image_id = pii.image_id;
4804 		pii.image_id = NULL;
4805 		parent_spec->snap_id = pii.snap_id;
4806 
4807 		rbd_dev->parent_spec = parent_spec;
4808 		parent_spec = NULL;	/* rbd_dev now owns this */
4809 	}
4810 
4811 	/*
4812 	 * We always update the parent overlap.  If it's zero we issue
4813 	 * a warning, as we will proceed as if there was no parent.
4814 	 */
4815 	if (!pii.overlap) {
4816 		if (parent_spec) {
4817 			/* refresh, careful to warn just once */
4818 			if (rbd_dev->parent_overlap)
4819 				rbd_warn(rbd_dev,
4820 				    "clone now standalone (overlap became 0)");
4821 		} else {
4822 			/* initial probe */
4823 			rbd_warn(rbd_dev, "clone is standalone (overlap 0)");
4824 		}
4825 	}
4826 	rbd_dev->parent_overlap = pii.overlap;
4827 
4828 out:
4829 	ret = 0;
4830 out_err:
4831 	kfree(pii.pool_ns);
4832 	kfree(pii.image_id);
4833 	rbd_spec_put(parent_spec);
4834 	return ret;
4835 }
4836 
rbd_dev_v2_striping_info(struct rbd_device * rbd_dev)4837 static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev)
4838 {
4839 	struct {
4840 		__le64 stripe_unit;
4841 		__le64 stripe_count;
4842 	} __attribute__ ((packed)) striping_info_buf = { 0 };
4843 	size_t size = sizeof (striping_info_buf);
4844 	void *p;
4845 	int ret;
4846 
4847 	ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
4848 				&rbd_dev->header_oloc, "get_stripe_unit_count",
4849 				NULL, 0, &striping_info_buf, size);
4850 	dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4851 	if (ret < 0)
4852 		return ret;
4853 	if (ret < size)
4854 		return -ERANGE;
4855 
4856 	p = &striping_info_buf;
4857 	rbd_dev->header.stripe_unit = ceph_decode_64(&p);
4858 	rbd_dev->header.stripe_count = ceph_decode_64(&p);
4859 	return 0;
4860 }
4861 
rbd_dev_v2_data_pool(struct rbd_device * rbd_dev)4862 static int rbd_dev_v2_data_pool(struct rbd_device *rbd_dev)
4863 {
4864 	__le64 data_pool_id;
4865 	int ret;
4866 
4867 	ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
4868 				  &rbd_dev->header_oloc, "get_data_pool",
4869 				  NULL, 0, &data_pool_id, sizeof(data_pool_id));
4870 	if (ret < 0)
4871 		return ret;
4872 	if (ret < sizeof(data_pool_id))
4873 		return -EBADMSG;
4874 
4875 	rbd_dev->header.data_pool_id = le64_to_cpu(data_pool_id);
4876 	WARN_ON(rbd_dev->header.data_pool_id == CEPH_NOPOOL);
4877 	return 0;
4878 }
4879 
rbd_dev_image_name(struct rbd_device * rbd_dev)4880 static char *rbd_dev_image_name(struct rbd_device *rbd_dev)
4881 {
4882 	CEPH_DEFINE_OID_ONSTACK(oid);
4883 	size_t image_id_size;
4884 	char *image_id;
4885 	void *p;
4886 	void *end;
4887 	size_t size;
4888 	void *reply_buf = NULL;
4889 	size_t len = 0;
4890 	char *image_name = NULL;
4891 	int ret;
4892 
4893 	rbd_assert(!rbd_dev->spec->image_name);
4894 
4895 	len = strlen(rbd_dev->spec->image_id);
4896 	image_id_size = sizeof (__le32) + len;
4897 	image_id = kmalloc(image_id_size, GFP_KERNEL);
4898 	if (!image_id)
4899 		return NULL;
4900 
4901 	p = image_id;
4902 	end = image_id + image_id_size;
4903 	ceph_encode_string(&p, end, rbd_dev->spec->image_id, (u32)len);
4904 
4905 	size = sizeof (__le32) + RBD_IMAGE_NAME_LEN_MAX;
4906 	reply_buf = kmalloc(size, GFP_KERNEL);
4907 	if (!reply_buf)
4908 		goto out;
4909 
4910 	ceph_oid_printf(&oid, "%s", RBD_DIRECTORY);
4911 	ret = rbd_obj_method_sync(rbd_dev, &oid, &rbd_dev->header_oloc,
4912 				  "dir_get_name", image_id, image_id_size,
4913 				  reply_buf, size);
4914 	if (ret < 0)
4915 		goto out;
4916 	p = reply_buf;
4917 	end = reply_buf + ret;
4918 
4919 	image_name = ceph_extract_encoded_string(&p, end, &len, GFP_KERNEL);
4920 	if (IS_ERR(image_name))
4921 		image_name = NULL;
4922 	else
4923 		dout("%s: name is %s len is %zd\n", __func__, image_name, len);
4924 out:
4925 	kfree(reply_buf);
4926 	kfree(image_id);
4927 
4928 	return image_name;
4929 }
4930 
rbd_v1_snap_id_by_name(struct rbd_device * rbd_dev,const char * name)4931 static u64 rbd_v1_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
4932 {
4933 	struct ceph_snap_context *snapc = rbd_dev->header.snapc;
4934 	const char *snap_name;
4935 	u32 which = 0;
4936 
4937 	/* Skip over names until we find the one we are looking for */
4938 
4939 	snap_name = rbd_dev->header.snap_names;
4940 	while (which < snapc->num_snaps) {
4941 		if (!strcmp(name, snap_name))
4942 			return snapc->snaps[which];
4943 		snap_name += strlen(snap_name) + 1;
4944 		which++;
4945 	}
4946 	return CEPH_NOSNAP;
4947 }
4948 
rbd_v2_snap_id_by_name(struct rbd_device * rbd_dev,const char * name)4949 static u64 rbd_v2_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
4950 {
4951 	struct ceph_snap_context *snapc = rbd_dev->header.snapc;
4952 	u32 which;
4953 	bool found = false;
4954 	u64 snap_id;
4955 
4956 	for (which = 0; !found && which < snapc->num_snaps; which++) {
4957 		const char *snap_name;
4958 
4959 		snap_id = snapc->snaps[which];
4960 		snap_name = rbd_dev_v2_snap_name(rbd_dev, snap_id);
4961 		if (IS_ERR(snap_name)) {
4962 			/* ignore no-longer existing snapshots */
4963 			if (PTR_ERR(snap_name) == -ENOENT)
4964 				continue;
4965 			else
4966 				break;
4967 		}
4968 		found = !strcmp(name, snap_name);
4969 		kfree(snap_name);
4970 	}
4971 	return found ? snap_id : CEPH_NOSNAP;
4972 }
4973 
4974 /*
4975  * Assumes name is never RBD_SNAP_HEAD_NAME; returns CEPH_NOSNAP if
4976  * no snapshot by that name is found, or if an error occurs.
4977  */
rbd_snap_id_by_name(struct rbd_device * rbd_dev,const char * name)4978 static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
4979 {
4980 	if (rbd_dev->image_format == 1)
4981 		return rbd_v1_snap_id_by_name(rbd_dev, name);
4982 
4983 	return rbd_v2_snap_id_by_name(rbd_dev, name);
4984 }
4985 
4986 /*
4987  * An image being mapped will have everything but the snap id.
4988  */
rbd_spec_fill_snap_id(struct rbd_device * rbd_dev)4989 static int rbd_spec_fill_snap_id(struct rbd_device *rbd_dev)
4990 {
4991 	struct rbd_spec *spec = rbd_dev->spec;
4992 
4993 	rbd_assert(spec->pool_id != CEPH_NOPOOL && spec->pool_name);
4994 	rbd_assert(spec->image_id && spec->image_name);
4995 	rbd_assert(spec->snap_name);
4996 
4997 	if (strcmp(spec->snap_name, RBD_SNAP_HEAD_NAME)) {
4998 		u64 snap_id;
4999 
5000 		snap_id = rbd_snap_id_by_name(rbd_dev, spec->snap_name);
5001 		if (snap_id == CEPH_NOSNAP)
5002 			return -ENOENT;
5003 
5004 		spec->snap_id = snap_id;
5005 	} else {
5006 		spec->snap_id = CEPH_NOSNAP;
5007 	}
5008 
5009 	return 0;
5010 }
5011 
5012 /*
5013  * A parent image will have all ids but none of the names.
5014  *
5015  * All names in an rbd spec are dynamically allocated.  It's OK if we
5016  * can't figure out the name for an image id.
5017  */
rbd_spec_fill_names(struct rbd_device * rbd_dev)5018 static int rbd_spec_fill_names(struct rbd_device *rbd_dev)
5019 {
5020 	struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
5021 	struct rbd_spec *spec = rbd_dev->spec;
5022 	const char *pool_name;
5023 	const char *image_name;
5024 	const char *snap_name;
5025 	int ret;
5026 
5027 	rbd_assert(spec->pool_id != CEPH_NOPOOL);
5028 	rbd_assert(spec->image_id);
5029 	rbd_assert(spec->snap_id != CEPH_NOSNAP);
5030 
5031 	/* Get the pool name; we have to make our own copy of this */
5032 
5033 	pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, spec->pool_id);
5034 	if (!pool_name) {
5035 		rbd_warn(rbd_dev, "no pool with id %llu", spec->pool_id);
5036 		return -EIO;
5037 	}
5038 	pool_name = kstrdup(pool_name, GFP_KERNEL);
5039 	if (!pool_name)
5040 		return -ENOMEM;
5041 
5042 	/* Fetch the image name; tolerate failure here */
5043 
5044 	image_name = rbd_dev_image_name(rbd_dev);
5045 	if (!image_name)
5046 		rbd_warn(rbd_dev, "unable to get image name");
5047 
5048 	/* Fetch the snapshot name */
5049 
5050 	snap_name = rbd_snap_name(rbd_dev, spec->snap_id);
5051 	if (IS_ERR(snap_name)) {
5052 		ret = PTR_ERR(snap_name);
5053 		goto out_err;
5054 	}
5055 
5056 	spec->pool_name = pool_name;
5057 	spec->image_name = image_name;
5058 	spec->snap_name = snap_name;
5059 
5060 	return 0;
5061 
5062 out_err:
5063 	kfree(image_name);
5064 	kfree(pool_name);
5065 	return ret;
5066 }
5067 
rbd_dev_v2_snap_context(struct rbd_device * rbd_dev)5068 static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev)
5069 {
5070 	size_t size;
5071 	int ret;
5072 	void *reply_buf;
5073 	void *p;
5074 	void *end;
5075 	u64 seq;
5076 	u32 snap_count;
5077 	struct ceph_snap_context *snapc;
5078 	u32 i;
5079 
5080 	/*
5081 	 * We'll need room for the seq value (maximum snapshot id),
5082 	 * snapshot count, and array of that many snapshot ids.
5083 	 * For now we have a fixed upper limit on the number we're
5084 	 * prepared to receive.
5085 	 */
5086 	size = sizeof (__le64) + sizeof (__le32) +
5087 			RBD_MAX_SNAP_COUNT * sizeof (__le64);
5088 	reply_buf = kzalloc(size, GFP_KERNEL);
5089 	if (!reply_buf)
5090 		return -ENOMEM;
5091 
5092 	ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
5093 				  &rbd_dev->header_oloc, "get_snapcontext",
5094 				  NULL, 0, reply_buf, size);
5095 	dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
5096 	if (ret < 0)
5097 		goto out;
5098 
5099 	p = reply_buf;
5100 	end = reply_buf + ret;
5101 	ret = -ERANGE;
5102 	ceph_decode_64_safe(&p, end, seq, out);
5103 	ceph_decode_32_safe(&p, end, snap_count, out);
5104 
5105 	/*
5106 	 * Make sure the reported number of snapshot ids wouldn't go
5107 	 * beyond the end of our buffer.  But before checking that,
5108 	 * make sure the computed size of the snapshot context we
5109 	 * allocate is representable in a size_t.
5110 	 */
5111 	if (snap_count > (SIZE_MAX - sizeof (struct ceph_snap_context))
5112 				 / sizeof (u64)) {
5113 		ret = -EINVAL;
5114 		goto out;
5115 	}
5116 	if (!ceph_has_room(&p, end, snap_count * sizeof (__le64)))
5117 		goto out;
5118 	ret = 0;
5119 
5120 	snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
5121 	if (!snapc) {
5122 		ret = -ENOMEM;
5123 		goto out;
5124 	}
5125 	snapc->seq = seq;
5126 	for (i = 0; i < snap_count; i++)
5127 		snapc->snaps[i] = ceph_decode_64(&p);
5128 
5129 	ceph_put_snap_context(rbd_dev->header.snapc);
5130 	rbd_dev->header.snapc = snapc;
5131 
5132 	dout("  snap context seq = %llu, snap_count = %u\n",
5133 		(unsigned long long)seq, (unsigned int)snap_count);
5134 out:
5135 	kfree(reply_buf);
5136 
5137 	return ret;
5138 }
5139 
rbd_dev_v2_snap_name(struct rbd_device * rbd_dev,u64 snap_id)5140 static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
5141 					u64 snap_id)
5142 {
5143 	size_t size;
5144 	void *reply_buf;
5145 	__le64 snapid;
5146 	int ret;
5147 	void *p;
5148 	void *end;
5149 	char *snap_name;
5150 
5151 	size = sizeof (__le32) + RBD_MAX_SNAP_NAME_LEN;
5152 	reply_buf = kmalloc(size, GFP_KERNEL);
5153 	if (!reply_buf)
5154 		return ERR_PTR(-ENOMEM);
5155 
5156 	snapid = cpu_to_le64(snap_id);
5157 	ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
5158 				  &rbd_dev->header_oloc, "get_snapshot_name",
5159 				  &snapid, sizeof(snapid), reply_buf, size);
5160 	dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
5161 	if (ret < 0) {
5162 		snap_name = ERR_PTR(ret);
5163 		goto out;
5164 	}
5165 
5166 	p = reply_buf;
5167 	end = reply_buf + ret;
5168 	snap_name = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
5169 	if (IS_ERR(snap_name))
5170 		goto out;
5171 
5172 	dout("  snap_id 0x%016llx snap_name = %s\n",
5173 		(unsigned long long)snap_id, snap_name);
5174 out:
5175 	kfree(reply_buf);
5176 
5177 	return snap_name;
5178 }
5179 
rbd_dev_v2_header_info(struct rbd_device * rbd_dev)5180 static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev)
5181 {
5182 	bool first_time = rbd_dev->header.object_prefix == NULL;
5183 	int ret;
5184 
5185 	ret = rbd_dev_v2_image_size(rbd_dev);
5186 	if (ret)
5187 		return ret;
5188 
5189 	if (first_time) {
5190 		ret = rbd_dev_v2_header_onetime(rbd_dev);
5191 		if (ret)
5192 			return ret;
5193 	}
5194 
5195 	ret = rbd_dev_v2_snap_context(rbd_dev);
5196 	if (ret && first_time) {
5197 		kfree(rbd_dev->header.object_prefix);
5198 		rbd_dev->header.object_prefix = NULL;
5199 	}
5200 
5201 	return ret;
5202 }
5203 
rbd_dev_header_info(struct rbd_device * rbd_dev)5204 static int rbd_dev_header_info(struct rbd_device *rbd_dev)
5205 {
5206 	rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
5207 
5208 	if (rbd_dev->image_format == 1)
5209 		return rbd_dev_v1_header_info(rbd_dev);
5210 
5211 	return rbd_dev_v2_header_info(rbd_dev);
5212 }
5213 
5214 /*
5215  * Skips over white space at *buf, and updates *buf to point to the
5216  * first found non-space character (if any). Returns the length of
5217  * the token (string of non-white space characters) found.  Note
5218  * that *buf must be terminated with '\0'.
5219  */
next_token(const char ** buf)5220 static inline size_t next_token(const char **buf)
5221 {
5222         /*
5223         * These are the characters that produce nonzero for
5224         * isspace() in the "C" and "POSIX" locales.
5225         */
5226         const char *spaces = " \f\n\r\t\v";
5227 
5228         *buf += strspn(*buf, spaces);	/* Find start of token */
5229 
5230 	return strcspn(*buf, spaces);   /* Return token length */
5231 }
5232 
5233 /*
5234  * Finds the next token in *buf, dynamically allocates a buffer big
5235  * enough to hold a copy of it, and copies the token into the new
5236  * buffer.  The copy is guaranteed to be terminated with '\0'.  Note
5237  * that a duplicate buffer is created even for a zero-length token.
5238  *
5239  * Returns a pointer to the newly-allocated duplicate, or a null
5240  * pointer if memory for the duplicate was not available.  If
5241  * the lenp argument is a non-null pointer, the length of the token
5242  * (not including the '\0') is returned in *lenp.
5243  *
5244  * If successful, the *buf pointer will be updated to point beyond
5245  * the end of the found token.
5246  *
5247  * Note: uses GFP_KERNEL for allocation.
5248  */
dup_token(const char ** buf,size_t * lenp)5249 static inline char *dup_token(const char **buf, size_t *lenp)
5250 {
5251 	char *dup;
5252 	size_t len;
5253 
5254 	len = next_token(buf);
5255 	dup = kmemdup(*buf, len + 1, GFP_KERNEL);
5256 	if (!dup)
5257 		return NULL;
5258 	*(dup + len) = '\0';
5259 	*buf += len;
5260 
5261 	if (lenp)
5262 		*lenp = len;
5263 
5264 	return dup;
5265 }
5266 
5267 /*
5268  * Parse the options provided for an "rbd add" (i.e., rbd image
5269  * mapping) request.  These arrive via a write to /sys/bus/rbd/add,
5270  * and the data written is passed here via a NUL-terminated buffer.
5271  * Returns 0 if successful or an error code otherwise.
5272  *
5273  * The information extracted from these options is recorded in
5274  * the other parameters which return dynamically-allocated
5275  * structures:
5276  *  ceph_opts
5277  *      The address of a pointer that will refer to a ceph options
5278  *      structure.  Caller must release the returned pointer using
5279  *      ceph_destroy_options() when it is no longer needed.
5280  *  rbd_opts
5281  *	Address of an rbd options pointer.  Fully initialized by
5282  *	this function; caller must release with kfree().
5283  *  spec
5284  *	Address of an rbd image specification pointer.  Fully
5285  *	initialized by this function based on parsed options.
5286  *	Caller must release with rbd_spec_put().
5287  *
5288  * The options passed take this form:
5289  *  <mon_addrs> <options> <pool_name> <image_name> [<snap_id>]
5290  * where:
5291  *  <mon_addrs>
5292  *      A comma-separated list of one or more monitor addresses.
5293  *      A monitor address is an ip address, optionally followed
5294  *      by a port number (separated by a colon).
5295  *        I.e.:  ip1[:port1][,ip2[:port2]...]
5296  *  <options>
5297  *      A comma-separated list of ceph and/or rbd options.
5298  *  <pool_name>
5299  *      The name of the rados pool containing the rbd image.
5300  *  <image_name>
5301  *      The name of the image in that pool to map.
5302  *  <snap_id>
5303  *      An optional snapshot id.  If provided, the mapping will
5304  *      present data from the image at the time that snapshot was
5305  *      created.  The image head is used if no snapshot id is
5306  *      provided.  Snapshot mappings are always read-only.
5307  */
rbd_add_parse_args(const char * buf,struct ceph_options ** ceph_opts,struct rbd_options ** opts,struct rbd_spec ** rbd_spec)5308 static int rbd_add_parse_args(const char *buf,
5309 				struct ceph_options **ceph_opts,
5310 				struct rbd_options **opts,
5311 				struct rbd_spec **rbd_spec)
5312 {
5313 	size_t len;
5314 	char *options;
5315 	const char *mon_addrs;
5316 	char *snap_name;
5317 	size_t mon_addrs_size;
5318 	struct parse_rbd_opts_ctx pctx = { 0 };
5319 	struct ceph_options *copts;
5320 	int ret;
5321 
5322 	/* The first four tokens are required */
5323 
5324 	len = next_token(&buf);
5325 	if (!len) {
5326 		rbd_warn(NULL, "no monitor address(es) provided");
5327 		return -EINVAL;
5328 	}
5329 	mon_addrs = buf;
5330 	mon_addrs_size = len + 1;
5331 	buf += len;
5332 
5333 	ret = -EINVAL;
5334 	options = dup_token(&buf, NULL);
5335 	if (!options)
5336 		return -ENOMEM;
5337 	if (!*options) {
5338 		rbd_warn(NULL, "no options provided");
5339 		goto out_err;
5340 	}
5341 
5342 	pctx.spec = rbd_spec_alloc();
5343 	if (!pctx.spec)
5344 		goto out_mem;
5345 
5346 	pctx.spec->pool_name = dup_token(&buf, NULL);
5347 	if (!pctx.spec->pool_name)
5348 		goto out_mem;
5349 	if (!*pctx.spec->pool_name) {
5350 		rbd_warn(NULL, "no pool name provided");
5351 		goto out_err;
5352 	}
5353 
5354 	pctx.spec->image_name = dup_token(&buf, NULL);
5355 	if (!pctx.spec->image_name)
5356 		goto out_mem;
5357 	if (!*pctx.spec->image_name) {
5358 		rbd_warn(NULL, "no image name provided");
5359 		goto out_err;
5360 	}
5361 
5362 	/*
5363 	 * Snapshot name is optional; default is to use "-"
5364 	 * (indicating the head/no snapshot).
5365 	 */
5366 	len = next_token(&buf);
5367 	if (!len) {
5368 		buf = RBD_SNAP_HEAD_NAME; /* No snapshot supplied */
5369 		len = sizeof (RBD_SNAP_HEAD_NAME) - 1;
5370 	} else if (len > RBD_MAX_SNAP_NAME_LEN) {
5371 		ret = -ENAMETOOLONG;
5372 		goto out_err;
5373 	}
5374 	snap_name = kmemdup(buf, len + 1, GFP_KERNEL);
5375 	if (!snap_name)
5376 		goto out_mem;
5377 	*(snap_name + len) = '\0';
5378 	pctx.spec->snap_name = snap_name;
5379 
5380 	/* Initialize all rbd options to the defaults */
5381 
5382 	pctx.opts = kzalloc(sizeof(*pctx.opts), GFP_KERNEL);
5383 	if (!pctx.opts)
5384 		goto out_mem;
5385 
5386 	pctx.opts->read_only = RBD_READ_ONLY_DEFAULT;
5387 	pctx.opts->queue_depth = RBD_QUEUE_DEPTH_DEFAULT;
5388 	pctx.opts->lock_timeout = RBD_LOCK_TIMEOUT_DEFAULT;
5389 	pctx.opts->lock_on_read = RBD_LOCK_ON_READ_DEFAULT;
5390 	pctx.opts->exclusive = RBD_EXCLUSIVE_DEFAULT;
5391 	pctx.opts->trim = RBD_TRIM_DEFAULT;
5392 
5393 	copts = ceph_parse_options(options, mon_addrs,
5394 				   mon_addrs + mon_addrs_size - 1,
5395 				   parse_rbd_opts_token, &pctx);
5396 	if (IS_ERR(copts)) {
5397 		ret = PTR_ERR(copts);
5398 		goto out_err;
5399 	}
5400 	kfree(options);
5401 
5402 	*ceph_opts = copts;
5403 	*opts = pctx.opts;
5404 	*rbd_spec = pctx.spec;
5405 
5406 	return 0;
5407 out_mem:
5408 	ret = -ENOMEM;
5409 out_err:
5410 	kfree(pctx.opts);
5411 	rbd_spec_put(pctx.spec);
5412 	kfree(options);
5413 
5414 	return ret;
5415 }
5416 
rbd_dev_image_unlock(struct rbd_device * rbd_dev)5417 static void rbd_dev_image_unlock(struct rbd_device *rbd_dev)
5418 {
5419 	down_write(&rbd_dev->lock_rwsem);
5420 	if (__rbd_is_lock_owner(rbd_dev))
5421 		rbd_unlock(rbd_dev);
5422 	up_write(&rbd_dev->lock_rwsem);
5423 }
5424 
rbd_add_acquire_lock(struct rbd_device * rbd_dev)5425 static int rbd_add_acquire_lock(struct rbd_device *rbd_dev)
5426 {
5427 	int ret;
5428 
5429 	if (!(rbd_dev->header.features & RBD_FEATURE_EXCLUSIVE_LOCK)) {
5430 		rbd_warn(rbd_dev, "exclusive-lock feature is not enabled");
5431 		return -EINVAL;
5432 	}
5433 
5434 	/* FIXME: "rbd map --exclusive" should be in interruptible */
5435 	down_read(&rbd_dev->lock_rwsem);
5436 	ret = rbd_wait_state_locked(rbd_dev, true);
5437 	up_read(&rbd_dev->lock_rwsem);
5438 	if (ret) {
5439 		rbd_warn(rbd_dev, "failed to acquire exclusive lock");
5440 		return -EROFS;
5441 	}
5442 
5443 	return 0;
5444 }
5445 
5446 /*
5447  * An rbd format 2 image has a unique identifier, distinct from the
5448  * name given to it by the user.  Internally, that identifier is
5449  * what's used to specify the names of objects related to the image.
5450  *
5451  * A special "rbd id" object is used to map an rbd image name to its
5452  * id.  If that object doesn't exist, then there is no v2 rbd image
5453  * with the supplied name.
5454  *
5455  * This function will record the given rbd_dev's image_id field if
5456  * it can be determined, and in that case will return 0.  If any
5457  * errors occur a negative errno will be returned and the rbd_dev's
5458  * image_id field will be unchanged (and should be NULL).
5459  */
rbd_dev_image_id(struct rbd_device * rbd_dev)5460 static int rbd_dev_image_id(struct rbd_device *rbd_dev)
5461 {
5462 	int ret;
5463 	size_t size;
5464 	CEPH_DEFINE_OID_ONSTACK(oid);
5465 	void *response;
5466 	char *image_id;
5467 
5468 	/*
5469 	 * When probing a parent image, the image id is already
5470 	 * known (and the image name likely is not).  There's no
5471 	 * need to fetch the image id again in this case.  We
5472 	 * do still need to set the image format though.
5473 	 */
5474 	if (rbd_dev->spec->image_id) {
5475 		rbd_dev->image_format = *rbd_dev->spec->image_id ? 2 : 1;
5476 
5477 		return 0;
5478 	}
5479 
5480 	/*
5481 	 * First, see if the format 2 image id file exists, and if
5482 	 * so, get the image's persistent id from it.
5483 	 */
5484 	ret = ceph_oid_aprintf(&oid, GFP_KERNEL, "%s%s", RBD_ID_PREFIX,
5485 			       rbd_dev->spec->image_name);
5486 	if (ret)
5487 		return ret;
5488 
5489 	dout("rbd id object name is %s\n", oid.name);
5490 
5491 	/* Response will be an encoded string, which includes a length */
5492 
5493 	size = sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX;
5494 	response = kzalloc(size, GFP_NOIO);
5495 	if (!response) {
5496 		ret = -ENOMEM;
5497 		goto out;
5498 	}
5499 
5500 	/* If it doesn't exist we'll assume it's a format 1 image */
5501 
5502 	ret = rbd_obj_method_sync(rbd_dev, &oid, &rbd_dev->header_oloc,
5503 				  "get_id", NULL, 0,
5504 				  response, RBD_IMAGE_ID_LEN_MAX);
5505 	dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
5506 	if (ret == -ENOENT) {
5507 		image_id = kstrdup("", GFP_KERNEL);
5508 		ret = image_id ? 0 : -ENOMEM;
5509 		if (!ret)
5510 			rbd_dev->image_format = 1;
5511 	} else if (ret >= 0) {
5512 		void *p = response;
5513 
5514 		image_id = ceph_extract_encoded_string(&p, p + ret,
5515 						NULL, GFP_NOIO);
5516 		ret = PTR_ERR_OR_ZERO(image_id);
5517 		if (!ret)
5518 			rbd_dev->image_format = 2;
5519 	}
5520 
5521 	if (!ret) {
5522 		rbd_dev->spec->image_id = image_id;
5523 		dout("image_id is %s\n", image_id);
5524 	}
5525 out:
5526 	kfree(response);
5527 	ceph_oid_destroy(&oid);
5528 	return ret;
5529 }
5530 
5531 /*
5532  * Undo whatever state changes are made by v1 or v2 header info
5533  * call.
5534  */
rbd_dev_unprobe(struct rbd_device * rbd_dev)5535 static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
5536 {
5537 	struct rbd_image_header	*header;
5538 
5539 	rbd_dev_parent_put(rbd_dev);
5540 
5541 	/* Free dynamic fields from the header, then zero it out */
5542 
5543 	header = &rbd_dev->header;
5544 	ceph_put_snap_context(header->snapc);
5545 	kfree(header->snap_sizes);
5546 	kfree(header->snap_names);
5547 	kfree(header->object_prefix);
5548 	memset(header, 0, sizeof (*header));
5549 }
5550 
rbd_dev_v2_header_onetime(struct rbd_device * rbd_dev)5551 static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev)
5552 {
5553 	int ret;
5554 
5555 	ret = rbd_dev_v2_object_prefix(rbd_dev);
5556 	if (ret)
5557 		goto out_err;
5558 
5559 	/*
5560 	 * Get the and check features for the image.  Currently the
5561 	 * features are assumed to never change.
5562 	 */
5563 	ret = rbd_dev_v2_features(rbd_dev);
5564 	if (ret)
5565 		goto out_err;
5566 
5567 	/* If the image supports fancy striping, get its parameters */
5568 
5569 	if (rbd_dev->header.features & RBD_FEATURE_STRIPINGV2) {
5570 		ret = rbd_dev_v2_striping_info(rbd_dev);
5571 		if (ret < 0)
5572 			goto out_err;
5573 	}
5574 
5575 	if (rbd_dev->header.features & RBD_FEATURE_DATA_POOL) {
5576 		ret = rbd_dev_v2_data_pool(rbd_dev);
5577 		if (ret)
5578 			goto out_err;
5579 	}
5580 
5581 	rbd_init_layout(rbd_dev);
5582 	return 0;
5583 
5584 out_err:
5585 	rbd_dev->header.features = 0;
5586 	kfree(rbd_dev->header.object_prefix);
5587 	rbd_dev->header.object_prefix = NULL;
5588 	return ret;
5589 }
5590 
5591 /*
5592  * @depth is rbd_dev_image_probe() -> rbd_dev_probe_parent() ->
5593  * rbd_dev_image_probe() recursion depth, which means it's also the
5594  * length of the already discovered part of the parent chain.
5595  */
rbd_dev_probe_parent(struct rbd_device * rbd_dev,int depth)5596 static int rbd_dev_probe_parent(struct rbd_device *rbd_dev, int depth)
5597 {
5598 	struct rbd_device *parent = NULL;
5599 	int ret;
5600 
5601 	if (!rbd_dev->parent_spec)
5602 		return 0;
5603 
5604 	if (++depth > RBD_MAX_PARENT_CHAIN_LEN) {
5605 		pr_info("parent chain is too long (%d)\n", depth);
5606 		ret = -EINVAL;
5607 		goto out_err;
5608 	}
5609 
5610 	parent = __rbd_dev_create(rbd_dev->rbd_client, rbd_dev->parent_spec);
5611 	if (!parent) {
5612 		ret = -ENOMEM;
5613 		goto out_err;
5614 	}
5615 
5616 	/*
5617 	 * Images related by parent/child relationships always share
5618 	 * rbd_client and spec/parent_spec, so bump their refcounts.
5619 	 */
5620 	__rbd_get_client(rbd_dev->rbd_client);
5621 	rbd_spec_get(rbd_dev->parent_spec);
5622 
5623 	ret = rbd_dev_image_probe(parent, depth);
5624 	if (ret < 0)
5625 		goto out_err;
5626 
5627 	rbd_dev->parent = parent;
5628 	atomic_set(&rbd_dev->parent_ref, 1);
5629 	return 0;
5630 
5631 out_err:
5632 	rbd_dev_unparent(rbd_dev);
5633 	rbd_dev_destroy(parent);
5634 	return ret;
5635 }
5636 
rbd_dev_device_release(struct rbd_device * rbd_dev)5637 static void rbd_dev_device_release(struct rbd_device *rbd_dev)
5638 {
5639 	clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
5640 	rbd_dev_mapping_clear(rbd_dev);
5641 	rbd_free_disk(rbd_dev);
5642 	if (!single_major)
5643 		unregister_blkdev(rbd_dev->major, rbd_dev->name);
5644 }
5645 
5646 /*
5647  * rbd_dev->header_rwsem must be locked for write and will be unlocked
5648  * upon return.
5649  */
rbd_dev_device_setup(struct rbd_device * rbd_dev)5650 static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
5651 {
5652 	int ret;
5653 
5654 	/* Record our major and minor device numbers. */
5655 
5656 	if (!single_major) {
5657 		ret = register_blkdev(0, rbd_dev->name);
5658 		if (ret < 0)
5659 			goto err_out_unlock;
5660 
5661 		rbd_dev->major = ret;
5662 		rbd_dev->minor = 0;
5663 	} else {
5664 		rbd_dev->major = rbd_major;
5665 		rbd_dev->minor = rbd_dev_id_to_minor(rbd_dev->dev_id);
5666 	}
5667 
5668 	/* Set up the blkdev mapping. */
5669 
5670 	ret = rbd_init_disk(rbd_dev);
5671 	if (ret)
5672 		goto err_out_blkdev;
5673 
5674 	ret = rbd_dev_mapping_set(rbd_dev);
5675 	if (ret)
5676 		goto err_out_disk;
5677 
5678 	set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
5679 	set_disk_ro(rbd_dev->disk, rbd_dev->opts->read_only);
5680 
5681 	ret = dev_set_name(&rbd_dev->dev, "%d", rbd_dev->dev_id);
5682 	if (ret)
5683 		goto err_out_mapping;
5684 
5685 	set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
5686 	up_write(&rbd_dev->header_rwsem);
5687 	return 0;
5688 
5689 err_out_mapping:
5690 	rbd_dev_mapping_clear(rbd_dev);
5691 err_out_disk:
5692 	rbd_free_disk(rbd_dev);
5693 err_out_blkdev:
5694 	if (!single_major)
5695 		unregister_blkdev(rbd_dev->major, rbd_dev->name);
5696 err_out_unlock:
5697 	up_write(&rbd_dev->header_rwsem);
5698 	return ret;
5699 }
5700 
rbd_dev_header_name(struct rbd_device * rbd_dev)5701 static int rbd_dev_header_name(struct rbd_device *rbd_dev)
5702 {
5703 	struct rbd_spec *spec = rbd_dev->spec;
5704 	int ret;
5705 
5706 	/* Record the header object name for this rbd image. */
5707 
5708 	rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
5709 	if (rbd_dev->image_format == 1)
5710 		ret = ceph_oid_aprintf(&rbd_dev->header_oid, GFP_KERNEL, "%s%s",
5711 				       spec->image_name, RBD_SUFFIX);
5712 	else
5713 		ret = ceph_oid_aprintf(&rbd_dev->header_oid, GFP_KERNEL, "%s%s",
5714 				       RBD_HEADER_PREFIX, spec->image_id);
5715 
5716 	return ret;
5717 }
5718 
rbd_dev_image_release(struct rbd_device * rbd_dev)5719 static void rbd_dev_image_release(struct rbd_device *rbd_dev)
5720 {
5721 	rbd_dev_unprobe(rbd_dev);
5722 	if (rbd_dev->opts)
5723 		rbd_unregister_watch(rbd_dev);
5724 	rbd_dev->image_format = 0;
5725 	kfree(rbd_dev->spec->image_id);
5726 	rbd_dev->spec->image_id = NULL;
5727 }
5728 
5729 /*
5730  * Probe for the existence of the header object for the given rbd
5731  * device.  If this image is the one being mapped (i.e., not a
5732  * parent), initiate a watch on its header object before using that
5733  * object to get detailed information about the rbd image.
5734  */
rbd_dev_image_probe(struct rbd_device * rbd_dev,int depth)5735 static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth)
5736 {
5737 	int ret;
5738 
5739 	/*
5740 	 * Get the id from the image id object.  Unless there's an
5741 	 * error, rbd_dev->spec->image_id will be filled in with
5742 	 * a dynamically-allocated string, and rbd_dev->image_format
5743 	 * will be set to either 1 or 2.
5744 	 */
5745 	ret = rbd_dev_image_id(rbd_dev);
5746 	if (ret)
5747 		return ret;
5748 
5749 	ret = rbd_dev_header_name(rbd_dev);
5750 	if (ret)
5751 		goto err_out_format;
5752 
5753 	if (!depth) {
5754 		ret = rbd_register_watch(rbd_dev);
5755 		if (ret) {
5756 			if (ret == -ENOENT)
5757 				pr_info("image %s/%s%s%s does not exist\n",
5758 					rbd_dev->spec->pool_name,
5759 					rbd_dev->spec->pool_ns ?: "",
5760 					rbd_dev->spec->pool_ns ? "/" : "",
5761 					rbd_dev->spec->image_name);
5762 			goto err_out_format;
5763 		}
5764 	}
5765 
5766 	ret = rbd_dev_header_info(rbd_dev);
5767 	if (ret)
5768 		goto err_out_watch;
5769 
5770 	/*
5771 	 * If this image is the one being mapped, we have pool name and
5772 	 * id, image name and id, and snap name - need to fill snap id.
5773 	 * Otherwise this is a parent image, identified by pool, image
5774 	 * and snap ids - need to fill in names for those ids.
5775 	 */
5776 	if (!depth)
5777 		ret = rbd_spec_fill_snap_id(rbd_dev);
5778 	else
5779 		ret = rbd_spec_fill_names(rbd_dev);
5780 	if (ret) {
5781 		if (ret == -ENOENT)
5782 			pr_info("snap %s/%s%s%s@%s does not exist\n",
5783 				rbd_dev->spec->pool_name,
5784 				rbd_dev->spec->pool_ns ?: "",
5785 				rbd_dev->spec->pool_ns ? "/" : "",
5786 				rbd_dev->spec->image_name,
5787 				rbd_dev->spec->snap_name);
5788 		goto err_out_probe;
5789 	}
5790 
5791 	if (rbd_dev->header.features & RBD_FEATURE_LAYERING) {
5792 		ret = rbd_dev_v2_parent_info(rbd_dev);
5793 		if (ret)
5794 			goto err_out_probe;
5795 
5796 		/*
5797 		 * Need to warn users if this image is the one being
5798 		 * mapped and has a parent.
5799 		 */
5800 		if (!depth && rbd_dev->parent_spec)
5801 			rbd_warn(rbd_dev,
5802 				 "WARNING: kernel layering is EXPERIMENTAL!");
5803 	}
5804 
5805 	ret = rbd_dev_probe_parent(rbd_dev, depth);
5806 	if (ret)
5807 		goto err_out_probe;
5808 
5809 	dout("discovered format %u image, header name is %s\n",
5810 		rbd_dev->image_format, rbd_dev->header_oid.name);
5811 	return 0;
5812 
5813 err_out_probe:
5814 	rbd_dev_unprobe(rbd_dev);
5815 err_out_watch:
5816 	if (!depth)
5817 		rbd_unregister_watch(rbd_dev);
5818 err_out_format:
5819 	rbd_dev->image_format = 0;
5820 	kfree(rbd_dev->spec->image_id);
5821 	rbd_dev->spec->image_id = NULL;
5822 	return ret;
5823 }
5824 
do_rbd_add(struct bus_type * bus,const char * buf,size_t count)5825 static ssize_t do_rbd_add(struct bus_type *bus,
5826 			  const char *buf,
5827 			  size_t count)
5828 {
5829 	struct rbd_device *rbd_dev = NULL;
5830 	struct ceph_options *ceph_opts = NULL;
5831 	struct rbd_options *rbd_opts = NULL;
5832 	struct rbd_spec *spec = NULL;
5833 	struct rbd_client *rbdc;
5834 	int rc;
5835 
5836 	if (!try_module_get(THIS_MODULE))
5837 		return -ENODEV;
5838 
5839 	/* parse add command */
5840 	rc = rbd_add_parse_args(buf, &ceph_opts, &rbd_opts, &spec);
5841 	if (rc < 0)
5842 		goto out;
5843 
5844 	rbdc = rbd_get_client(ceph_opts);
5845 	if (IS_ERR(rbdc)) {
5846 		rc = PTR_ERR(rbdc);
5847 		goto err_out_args;
5848 	}
5849 
5850 	/* pick the pool */
5851 	rc = ceph_pg_poolid_by_name(rbdc->client->osdc.osdmap, spec->pool_name);
5852 	if (rc < 0) {
5853 		if (rc == -ENOENT)
5854 			pr_info("pool %s does not exist\n", spec->pool_name);
5855 		goto err_out_client;
5856 	}
5857 	spec->pool_id = (u64)rc;
5858 
5859 	rbd_dev = rbd_dev_create(rbdc, spec, rbd_opts);
5860 	if (!rbd_dev) {
5861 		rc = -ENOMEM;
5862 		goto err_out_client;
5863 	}
5864 	rbdc = NULL;		/* rbd_dev now owns this */
5865 	spec = NULL;		/* rbd_dev now owns this */
5866 	rbd_opts = NULL;	/* rbd_dev now owns this */
5867 
5868 	rbd_dev->config_info = kstrdup(buf, GFP_KERNEL);
5869 	if (!rbd_dev->config_info) {
5870 		rc = -ENOMEM;
5871 		goto err_out_rbd_dev;
5872 	}
5873 
5874 	down_write(&rbd_dev->header_rwsem);
5875 	rc = rbd_dev_image_probe(rbd_dev, 0);
5876 	if (rc < 0) {
5877 		up_write(&rbd_dev->header_rwsem);
5878 		goto err_out_rbd_dev;
5879 	}
5880 
5881 	/* If we are mapping a snapshot it must be marked read-only */
5882 	if (rbd_dev->spec->snap_id != CEPH_NOSNAP)
5883 		rbd_dev->opts->read_only = true;
5884 
5885 	rc = rbd_dev_device_setup(rbd_dev);
5886 	if (rc)
5887 		goto err_out_image_probe;
5888 
5889 	if (rbd_dev->opts->exclusive) {
5890 		rc = rbd_add_acquire_lock(rbd_dev);
5891 		if (rc)
5892 			goto err_out_device_setup;
5893 	}
5894 
5895 	/* Everything's ready.  Announce the disk to the world. */
5896 
5897 	rc = device_add(&rbd_dev->dev);
5898 	if (rc)
5899 		goto err_out_image_lock;
5900 
5901 	add_disk(rbd_dev->disk);
5902 	/* see rbd_init_disk() */
5903 	blk_put_queue(rbd_dev->disk->queue);
5904 
5905 	spin_lock(&rbd_dev_list_lock);
5906 	list_add_tail(&rbd_dev->node, &rbd_dev_list);
5907 	spin_unlock(&rbd_dev_list_lock);
5908 
5909 	pr_info("%s: capacity %llu features 0x%llx\n", rbd_dev->disk->disk_name,
5910 		(unsigned long long)get_capacity(rbd_dev->disk) << SECTOR_SHIFT,
5911 		rbd_dev->header.features);
5912 	rc = count;
5913 out:
5914 	module_put(THIS_MODULE);
5915 	return rc;
5916 
5917 err_out_image_lock:
5918 	rbd_dev_image_unlock(rbd_dev);
5919 err_out_device_setup:
5920 	rbd_dev_device_release(rbd_dev);
5921 err_out_image_probe:
5922 	rbd_dev_image_release(rbd_dev);
5923 err_out_rbd_dev:
5924 	rbd_dev_destroy(rbd_dev);
5925 err_out_client:
5926 	rbd_put_client(rbdc);
5927 err_out_args:
5928 	rbd_spec_put(spec);
5929 	kfree(rbd_opts);
5930 	goto out;
5931 }
5932 
rbd_add(struct bus_type * bus,const char * buf,size_t count)5933 static ssize_t rbd_add(struct bus_type *bus,
5934 		       const char *buf,
5935 		       size_t count)
5936 {
5937 	if (single_major)
5938 		return -EINVAL;
5939 
5940 	return do_rbd_add(bus, buf, count);
5941 }
5942 
rbd_add_single_major(struct bus_type * bus,const char * buf,size_t count)5943 static ssize_t rbd_add_single_major(struct bus_type *bus,
5944 				    const char *buf,
5945 				    size_t count)
5946 {
5947 	return do_rbd_add(bus, buf, count);
5948 }
5949 
rbd_dev_remove_parent(struct rbd_device * rbd_dev)5950 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev)
5951 {
5952 	while (rbd_dev->parent) {
5953 		struct rbd_device *first = rbd_dev;
5954 		struct rbd_device *second = first->parent;
5955 		struct rbd_device *third;
5956 
5957 		/*
5958 		 * Follow to the parent with no grandparent and
5959 		 * remove it.
5960 		 */
5961 		while (second && (third = second->parent)) {
5962 			first = second;
5963 			second = third;
5964 		}
5965 		rbd_assert(second);
5966 		rbd_dev_image_release(second);
5967 		rbd_dev_destroy(second);
5968 		first->parent = NULL;
5969 		first->parent_overlap = 0;
5970 
5971 		rbd_assert(first->parent_spec);
5972 		rbd_spec_put(first->parent_spec);
5973 		first->parent_spec = NULL;
5974 	}
5975 }
5976 
do_rbd_remove(struct bus_type * bus,const char * buf,size_t count)5977 static ssize_t do_rbd_remove(struct bus_type *bus,
5978 			     const char *buf,
5979 			     size_t count)
5980 {
5981 	struct rbd_device *rbd_dev = NULL;
5982 	struct list_head *tmp;
5983 	int dev_id;
5984 	char opt_buf[6];
5985 	bool already = false;
5986 	bool force = false;
5987 	int ret;
5988 
5989 	dev_id = -1;
5990 	opt_buf[0] = '\0';
5991 	sscanf(buf, "%d %5s", &dev_id, opt_buf);
5992 	if (dev_id < 0) {
5993 		pr_err("dev_id out of range\n");
5994 		return -EINVAL;
5995 	}
5996 	if (opt_buf[0] != '\0') {
5997 		if (!strcmp(opt_buf, "force")) {
5998 			force = true;
5999 		} else {
6000 			pr_err("bad remove option at '%s'\n", opt_buf);
6001 			return -EINVAL;
6002 		}
6003 	}
6004 
6005 	ret = -ENOENT;
6006 	spin_lock(&rbd_dev_list_lock);
6007 	list_for_each(tmp, &rbd_dev_list) {
6008 		rbd_dev = list_entry(tmp, struct rbd_device, node);
6009 		if (rbd_dev->dev_id == dev_id) {
6010 			ret = 0;
6011 			break;
6012 		}
6013 	}
6014 	if (!ret) {
6015 		spin_lock_irq(&rbd_dev->lock);
6016 		if (rbd_dev->open_count && !force)
6017 			ret = -EBUSY;
6018 		else
6019 			already = test_and_set_bit(RBD_DEV_FLAG_REMOVING,
6020 							&rbd_dev->flags);
6021 		spin_unlock_irq(&rbd_dev->lock);
6022 	}
6023 	spin_unlock(&rbd_dev_list_lock);
6024 	if (ret < 0 || already)
6025 		return ret;
6026 
6027 	if (force) {
6028 		/*
6029 		 * Prevent new IO from being queued and wait for existing
6030 		 * IO to complete/fail.
6031 		 */
6032 		blk_mq_freeze_queue(rbd_dev->disk->queue);
6033 		blk_set_queue_dying(rbd_dev->disk->queue);
6034 	}
6035 
6036 	del_gendisk(rbd_dev->disk);
6037 	spin_lock(&rbd_dev_list_lock);
6038 	list_del_init(&rbd_dev->node);
6039 	spin_unlock(&rbd_dev_list_lock);
6040 	device_del(&rbd_dev->dev);
6041 
6042 	rbd_dev_image_unlock(rbd_dev);
6043 	rbd_dev_device_release(rbd_dev);
6044 	rbd_dev_image_release(rbd_dev);
6045 	rbd_dev_destroy(rbd_dev);
6046 	return count;
6047 }
6048 
rbd_remove(struct bus_type * bus,const char * buf,size_t count)6049 static ssize_t rbd_remove(struct bus_type *bus,
6050 			  const char *buf,
6051 			  size_t count)
6052 {
6053 	if (single_major)
6054 		return -EINVAL;
6055 
6056 	return do_rbd_remove(bus, buf, count);
6057 }
6058 
rbd_remove_single_major(struct bus_type * bus,const char * buf,size_t count)6059 static ssize_t rbd_remove_single_major(struct bus_type *bus,
6060 				       const char *buf,
6061 				       size_t count)
6062 {
6063 	return do_rbd_remove(bus, buf, count);
6064 }
6065 
6066 /*
6067  * create control files in sysfs
6068  * /sys/bus/rbd/...
6069  */
rbd_sysfs_init(void)6070 static int rbd_sysfs_init(void)
6071 {
6072 	int ret;
6073 
6074 	ret = device_register(&rbd_root_dev);
6075 	if (ret < 0)
6076 		return ret;
6077 
6078 	ret = bus_register(&rbd_bus_type);
6079 	if (ret < 0)
6080 		device_unregister(&rbd_root_dev);
6081 
6082 	return ret;
6083 }
6084 
rbd_sysfs_cleanup(void)6085 static void rbd_sysfs_cleanup(void)
6086 {
6087 	bus_unregister(&rbd_bus_type);
6088 	device_unregister(&rbd_root_dev);
6089 }
6090 
rbd_slab_init(void)6091 static int rbd_slab_init(void)
6092 {
6093 	rbd_assert(!rbd_img_request_cache);
6094 	rbd_img_request_cache = KMEM_CACHE(rbd_img_request, 0);
6095 	if (!rbd_img_request_cache)
6096 		return -ENOMEM;
6097 
6098 	rbd_assert(!rbd_obj_request_cache);
6099 	rbd_obj_request_cache = KMEM_CACHE(rbd_obj_request, 0);
6100 	if (!rbd_obj_request_cache)
6101 		goto out_err;
6102 
6103 	return 0;
6104 
6105 out_err:
6106 	kmem_cache_destroy(rbd_img_request_cache);
6107 	rbd_img_request_cache = NULL;
6108 	return -ENOMEM;
6109 }
6110 
rbd_slab_exit(void)6111 static void rbd_slab_exit(void)
6112 {
6113 	rbd_assert(rbd_obj_request_cache);
6114 	kmem_cache_destroy(rbd_obj_request_cache);
6115 	rbd_obj_request_cache = NULL;
6116 
6117 	rbd_assert(rbd_img_request_cache);
6118 	kmem_cache_destroy(rbd_img_request_cache);
6119 	rbd_img_request_cache = NULL;
6120 }
6121 
rbd_init(void)6122 static int __init rbd_init(void)
6123 {
6124 	int rc;
6125 
6126 	if (!libceph_compatible(NULL)) {
6127 		rbd_warn(NULL, "libceph incompatibility (quitting)");
6128 		return -EINVAL;
6129 	}
6130 
6131 	rc = rbd_slab_init();
6132 	if (rc)
6133 		return rc;
6134 
6135 	/*
6136 	 * The number of active work items is limited by the number of
6137 	 * rbd devices * queue depth, so leave @max_active at default.
6138 	 */
6139 	rbd_wq = alloc_workqueue(RBD_DRV_NAME, WQ_MEM_RECLAIM, 0);
6140 	if (!rbd_wq) {
6141 		rc = -ENOMEM;
6142 		goto err_out_slab;
6143 	}
6144 
6145 	if (single_major) {
6146 		rbd_major = register_blkdev(0, RBD_DRV_NAME);
6147 		if (rbd_major < 0) {
6148 			rc = rbd_major;
6149 			goto err_out_wq;
6150 		}
6151 	}
6152 
6153 	rc = rbd_sysfs_init();
6154 	if (rc)
6155 		goto err_out_blkdev;
6156 
6157 	if (single_major)
6158 		pr_info("loaded (major %d)\n", rbd_major);
6159 	else
6160 		pr_info("loaded\n");
6161 
6162 	return 0;
6163 
6164 err_out_blkdev:
6165 	if (single_major)
6166 		unregister_blkdev(rbd_major, RBD_DRV_NAME);
6167 err_out_wq:
6168 	destroy_workqueue(rbd_wq);
6169 err_out_slab:
6170 	rbd_slab_exit();
6171 	return rc;
6172 }
6173 
rbd_exit(void)6174 static void __exit rbd_exit(void)
6175 {
6176 	ida_destroy(&rbd_dev_id_ida);
6177 	rbd_sysfs_cleanup();
6178 	if (single_major)
6179 		unregister_blkdev(rbd_major, RBD_DRV_NAME);
6180 	destroy_workqueue(rbd_wq);
6181 	rbd_slab_exit();
6182 }
6183 
6184 module_init(rbd_init);
6185 module_exit(rbd_exit);
6186 
6187 MODULE_AUTHOR("Alex Elder <elder@inktank.com>");
6188 MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
6189 MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
6190 /* following authorship retained from original osdblk.c */
6191 MODULE_AUTHOR("Jeff Garzik <jeff@garzik.org>");
6192 
6193 MODULE_DESCRIPTION("RADOS Block Device (RBD) driver");
6194 MODULE_LICENSE("GPL");
6195