3 rbd.c -- Export ceph rados objects as a Linux block device
6 based on drivers/block/osdblk.c:
8 Copyright 2009 Red Hat, Inc.
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program; see the file COPYING. If not, write to
21 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 For usage instructions, please refer to:
27 Documentation/ABI/testing/sysfs-bus-rbd
31 #include <linux/ceph/libceph.h>
32 #include <linux/ceph/osd_client.h>
33 #include <linux/ceph/mon_client.h>
34 #include <linux/ceph/cls_lock_client.h>
35 #include <linux/ceph/decode.h>
36 #include <linux/parser.h>
37 #include <linux/bsearch.h>
39 #include <linux/kernel.h>
40 #include <linux/device.h>
41 #include <linux/module.h>
42 #include <linux/blk-mq.h>
44 #include <linux/blkdev.h>
45 #include <linux/slab.h>
46 #include <linux/idr.h>
47 #include <linux/workqueue.h>
49 #include "rbd_types.h"
51 #define RBD_DEBUG /* Activate rbd_assert() calls */
54 * The basic unit of block I/O is a sector. It is interpreted in a
55 * number of contexts in Linux (blk, bio, genhd), but the default is
56 * universally 512 bytes. These symbols are just slightly more
57 * meaningful than the bare numbers they represent.
59 #define SECTOR_SHIFT 9
60 #define SECTOR_SIZE (1ULL << SECTOR_SHIFT)
63 * Increment the given counter and return its updated value.
64 * If the counter is already 0 it will not be incremented.
65 * If the counter is already at its maximum value returns
66 * -EINVAL without updating it.
68 static int atomic_inc_return_safe(atomic_t *v)
72 counter = (unsigned int)__atomic_add_unless(v, 1, 0);
73 if (counter <= (unsigned int)INT_MAX)
81 /* Decrement the counter. Return the resulting value, or -EINVAL */
82 static int atomic_dec_return_safe(atomic_t *v)
86 counter = atomic_dec_return(v);
95 #define RBD_DRV_NAME "rbd"
97 #define RBD_MINORS_PER_MAJOR 256
98 #define RBD_SINGLE_MAJOR_PART_SHIFT 4
100 #define RBD_MAX_PARENT_CHAIN_LEN 16
102 #define RBD_SNAP_DEV_NAME_PREFIX "snap_"
103 #define RBD_MAX_SNAP_NAME_LEN \
104 (NAME_MAX - (sizeof (RBD_SNAP_DEV_NAME_PREFIX) - 1))
106 #define RBD_MAX_SNAP_COUNT 510 /* allows max snapc to fit in 4KB */
108 #define RBD_SNAP_HEAD_NAME "-"
110 #define BAD_SNAP_INDEX U32_MAX /* invalid index into snap array */
112 /* This allows a single page to hold an image name sent by OSD */
113 #define RBD_IMAGE_NAME_LEN_MAX (PAGE_SIZE - sizeof (__le32) - 1)
114 #define RBD_IMAGE_ID_LEN_MAX 64
116 #define RBD_OBJ_PREFIX_LEN_MAX 64
118 #define RBD_NOTIFY_TIMEOUT 5 /* seconds */
119 #define RBD_RETRY_DELAY msecs_to_jiffies(1000)
123 #define RBD_FEATURE_LAYERING (1<<0)
124 #define RBD_FEATURE_STRIPINGV2 (1<<1)
125 #define RBD_FEATURE_EXCLUSIVE_LOCK (1<<2)
126 #define RBD_FEATURES_ALL (RBD_FEATURE_LAYERING | \
127 RBD_FEATURE_STRIPINGV2 | \
128 RBD_FEATURE_EXCLUSIVE_LOCK)
130 /* Features supported by this (client software) implementation. */
132 #define RBD_FEATURES_SUPPORTED (RBD_FEATURES_ALL)
135 * An RBD device name will be "rbd#", where the "rbd" comes from
136 * RBD_DRV_NAME above, and # is a unique integer identifier.
138 #define DEV_NAME_LEN 32
141 * block device image metadata (in-memory version)
143 struct rbd_image_header {
144 /* These six fields never change for a given rbd image */
151 u64 features; /* Might be changeable someday? */
153 /* The remaining fields need to be updated occasionally */
155 struct ceph_snap_context *snapc;
156 char *snap_names; /* format 1 only */
157 u64 *snap_sizes; /* format 1 only */
161 * An rbd image specification.
163 * The tuple (pool_id, image_id, snap_id) is sufficient to uniquely
164 * identify an image. Each rbd_dev structure includes a pointer to
165 * an rbd_spec structure that encapsulates this identity.
167 * Each of the id's in an rbd_spec has an associated name. For a
168 * user-mapped image, the names are supplied and the id's associated
169 * with them are looked up. For a layered image, a parent image is
170 * defined by the tuple, and the names are looked up.
172 * An rbd_dev structure contains a parent_spec pointer which is
173 * non-null if the image it represents is a child in a layered
174 * image. This pointer will refer to the rbd_spec structure used
175 * by the parent rbd_dev for its own identity (i.e., the structure
176 * is shared between the parent and child).
178 * Since these structures are populated once, during the discovery
179 * phase of image construction, they are effectively immutable so
180 * we make no effort to synchronize access to them.
182 * Note that code herein does not assume the image name is known (it
183 * could be a null pointer).
187 const char *pool_name;
189 const char *image_id;
190 const char *image_name;
193 const char *snap_name;
199 * an instance of the client. multiple devices may share an rbd client.
202 struct ceph_client *client;
204 struct list_head node;
207 struct rbd_img_request;
208 typedef void (*rbd_img_callback_t)(struct rbd_img_request *);
210 #define BAD_WHICH U32_MAX /* Good which or bad which, which? */
212 struct rbd_obj_request;
213 typedef void (*rbd_obj_callback_t)(struct rbd_obj_request *);
215 enum obj_request_type {
216 OBJ_REQUEST_NODATA, OBJ_REQUEST_BIO, OBJ_REQUEST_PAGES
219 enum obj_operation_type {
226 OBJ_REQ_DONE, /* completion flag: not done = 0, done = 1 */
227 OBJ_REQ_IMG_DATA, /* object usage: standalone = 0, image = 1 */
228 OBJ_REQ_KNOWN, /* EXISTS flag valid: no = 0, yes = 1 */
229 OBJ_REQ_EXISTS, /* target exists: no = 0, yes = 1 */
232 struct rbd_obj_request {
233 const char *object_name;
234 u64 offset; /* object start byte */
235 u64 length; /* bytes from offset */
239 * An object request associated with an image will have its
240 * img_data flag set; a standalone object request will not.
242 * A standalone object request will have which == BAD_WHICH
243 * and a null obj_request pointer.
245 * An object request initiated in support of a layered image
246 * object (to check for its existence before a write) will
247 * have which == BAD_WHICH and a non-null obj_request pointer.
249 * Finally, an object request for rbd image data will have
250 * which != BAD_WHICH, and will have a non-null img_request
251 * pointer. The value of which will be in the range
252 * 0..(img_request->obj_request_count-1).
255 struct rbd_obj_request *obj_request; /* STAT op */
257 struct rbd_img_request *img_request;
259 /* links for img_request->obj_requests list */
260 struct list_head links;
263 u32 which; /* posn image request list */
265 enum obj_request_type type;
267 struct bio *bio_list;
273 struct page **copyup_pages;
274 u32 copyup_page_count;
276 struct ceph_osd_request *osd_req;
278 u64 xferred; /* bytes transferred */
281 rbd_obj_callback_t callback;
282 struct completion completion;
288 IMG_REQ_WRITE, /* I/O direction: read = 0, write = 1 */
289 IMG_REQ_CHILD, /* initiator: block = 0, child image = 1 */
290 IMG_REQ_LAYERED, /* ENOENT handling: normal = 0, layered = 1 */
291 IMG_REQ_DISCARD, /* discard: normal = 0, discard request = 1 */
294 struct rbd_img_request {
295 struct rbd_device *rbd_dev;
296 u64 offset; /* starting image byte offset */
297 u64 length; /* byte count from offset */
300 u64 snap_id; /* for reads */
301 struct ceph_snap_context *snapc; /* for writes */
304 struct request *rq; /* block request */
305 struct rbd_obj_request *obj_request; /* obj req initiator */
307 struct page **copyup_pages;
308 u32 copyup_page_count;
309 spinlock_t completion_lock;/* protects next_completion */
311 rbd_img_callback_t callback;
312 u64 xferred;/* aggregate bytes transferred */
313 int result; /* first nonzero obj_request result */
315 u32 obj_request_count;
316 struct list_head obj_requests; /* rbd_obj_request structs */
321 #define for_each_obj_request(ireq, oreq) \
322 list_for_each_entry(oreq, &(ireq)->obj_requests, links)
323 #define for_each_obj_request_from(ireq, oreq) \
324 list_for_each_entry_from(oreq, &(ireq)->obj_requests, links)
325 #define for_each_obj_request_safe(ireq, oreq, n) \
326 list_for_each_entry_safe_reverse(oreq, n, &(ireq)->obj_requests, links)
328 enum rbd_watch_state {
329 RBD_WATCH_STATE_UNREGISTERED,
330 RBD_WATCH_STATE_REGISTERED,
331 RBD_WATCH_STATE_ERROR,
334 enum rbd_lock_state {
335 RBD_LOCK_STATE_UNLOCKED,
336 RBD_LOCK_STATE_LOCKED,
337 RBD_LOCK_STATE_RELEASING,
340 /* WatchNotify::ClientId */
341 struct rbd_client_id {
356 int dev_id; /* blkdev unique id */
358 int major; /* blkdev assigned major */
360 struct gendisk *disk; /* blkdev's gendisk and rq */
362 u32 image_format; /* Either 1 or 2 */
363 struct rbd_client *rbd_client;
365 char name[DEV_NAME_LEN]; /* blkdev name, e.g. rbd3 */
367 spinlock_t lock; /* queue, flags, open_count */
369 struct rbd_image_header header;
370 unsigned long flags; /* possibly lock protected */
371 struct rbd_spec *spec;
372 struct rbd_options *opts;
373 char *config_info; /* add{,_single_major} string */
375 struct ceph_object_id header_oid;
376 struct ceph_object_locator header_oloc;
378 struct ceph_file_layout layout; /* used for all rbd requests */
380 struct mutex watch_mutex;
381 enum rbd_watch_state watch_state;
382 struct ceph_osd_linger_request *watch_handle;
384 struct delayed_work watch_dwork;
386 struct rw_semaphore lock_rwsem;
387 enum rbd_lock_state lock_state;
388 struct rbd_client_id owner_cid;
389 struct work_struct acquired_lock_work;
390 struct work_struct released_lock_work;
391 struct delayed_work lock_dwork;
392 struct work_struct unlock_work;
393 wait_queue_head_t lock_waitq;
395 struct workqueue_struct *task_wq;
397 struct rbd_spec *parent_spec;
400 struct rbd_device *parent;
402 /* Block layer tags. */
403 struct blk_mq_tag_set tag_set;
405 /* protects updating the header */
406 struct rw_semaphore header_rwsem;
408 struct rbd_mapping mapping;
410 struct list_head node;
414 unsigned long open_count; /* protected by lock */
418 * Flag bits for rbd_dev->flags. If atomicity is required,
419 * rbd_dev->lock is used to protect access.
421 * Currently, only the "removing" flag (which is coupled with the
422 * "open_count" field) requires atomic access.
425 RBD_DEV_FLAG_EXISTS, /* mapped snapshot has not been deleted */
426 RBD_DEV_FLAG_REMOVING, /* this mapping is being removed */
429 static DEFINE_MUTEX(client_mutex); /* Serialize client creation */
431 static LIST_HEAD(rbd_dev_list); /* devices */
432 static DEFINE_SPINLOCK(rbd_dev_list_lock);
434 static LIST_HEAD(rbd_client_list); /* clients */
435 static DEFINE_SPINLOCK(rbd_client_list_lock);
437 /* Slab caches for frequently-allocated structures */
439 static struct kmem_cache *rbd_img_request_cache;
440 static struct kmem_cache *rbd_obj_request_cache;
441 static struct kmem_cache *rbd_segment_name_cache;
443 static int rbd_major;
444 static DEFINE_IDA(rbd_dev_id_ida);
446 static struct workqueue_struct *rbd_wq;
449 * Default to false for now, as single-major requires >= 0.75 version of
450 * userspace rbd utility.
452 static bool single_major = false;
453 module_param(single_major, bool, S_IRUGO);
454 MODULE_PARM_DESC(single_major, "Use a single major number for all rbd devices (default: false)");
456 static int rbd_img_request_submit(struct rbd_img_request *img_request);
458 static ssize_t rbd_add(struct bus_type *bus, const char *buf,
460 static ssize_t rbd_remove(struct bus_type *bus, const char *buf,
462 static ssize_t rbd_add_single_major(struct bus_type *bus, const char *buf,
464 static ssize_t rbd_remove_single_major(struct bus_type *bus, const char *buf,
466 static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth);
467 static void rbd_spec_put(struct rbd_spec *spec);
469 static int rbd_dev_id_to_minor(int dev_id)
471 return dev_id << RBD_SINGLE_MAJOR_PART_SHIFT;
474 static int minor_to_rbd_dev_id(int minor)
476 return minor >> RBD_SINGLE_MAJOR_PART_SHIFT;
479 static bool rbd_is_lock_supported(struct rbd_device *rbd_dev)
481 return (rbd_dev->header.features & RBD_FEATURE_EXCLUSIVE_LOCK) &&
482 rbd_dev->spec->snap_id == CEPH_NOSNAP &&
483 !rbd_dev->mapping.read_only;
486 static bool __rbd_is_lock_owner(struct rbd_device *rbd_dev)
488 return rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED ||
489 rbd_dev->lock_state == RBD_LOCK_STATE_RELEASING;
492 static bool rbd_is_lock_owner(struct rbd_device *rbd_dev)
496 down_read(&rbd_dev->lock_rwsem);
497 is_lock_owner = __rbd_is_lock_owner(rbd_dev);
498 up_read(&rbd_dev->lock_rwsem);
499 return is_lock_owner;
502 static BUS_ATTR(add, S_IWUSR, NULL, rbd_add);
503 static BUS_ATTR(remove, S_IWUSR, NULL, rbd_remove);
504 static BUS_ATTR(add_single_major, S_IWUSR, NULL, rbd_add_single_major);
505 static BUS_ATTR(remove_single_major, S_IWUSR, NULL, rbd_remove_single_major);
507 static struct attribute *rbd_bus_attrs[] = {
509 &bus_attr_remove.attr,
510 &bus_attr_add_single_major.attr,
511 &bus_attr_remove_single_major.attr,
515 static umode_t rbd_bus_is_visible(struct kobject *kobj,
516 struct attribute *attr, int index)
519 (attr == &bus_attr_add_single_major.attr ||
520 attr == &bus_attr_remove_single_major.attr))
526 static const struct attribute_group rbd_bus_group = {
527 .attrs = rbd_bus_attrs,
528 .is_visible = rbd_bus_is_visible,
530 __ATTRIBUTE_GROUPS(rbd_bus);
532 static struct bus_type rbd_bus_type = {
534 .bus_groups = rbd_bus_groups,
537 static void rbd_root_dev_release(struct device *dev)
541 static struct device rbd_root_dev = {
543 .release = rbd_root_dev_release,
546 static __printf(2, 3)
547 void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...)
549 struct va_format vaf;
557 printk(KERN_WARNING "%s: %pV\n", RBD_DRV_NAME, &vaf);
558 else if (rbd_dev->disk)
559 printk(KERN_WARNING "%s: %s: %pV\n",
560 RBD_DRV_NAME, rbd_dev->disk->disk_name, &vaf);
561 else if (rbd_dev->spec && rbd_dev->spec->image_name)
562 printk(KERN_WARNING "%s: image %s: %pV\n",
563 RBD_DRV_NAME, rbd_dev->spec->image_name, &vaf);
564 else if (rbd_dev->spec && rbd_dev->spec->image_id)
565 printk(KERN_WARNING "%s: id %s: %pV\n",
566 RBD_DRV_NAME, rbd_dev->spec->image_id, &vaf);
568 printk(KERN_WARNING "%s: rbd_dev %p: %pV\n",
569 RBD_DRV_NAME, rbd_dev, &vaf);
574 #define rbd_assert(expr) \
575 if (unlikely(!(expr))) { \
576 printk(KERN_ERR "\nAssertion failure in %s() " \
578 "\trbd_assert(%s);\n\n", \
579 __func__, __LINE__, #expr); \
582 #else /* !RBD_DEBUG */
583 # define rbd_assert(expr) ((void) 0)
584 #endif /* !RBD_DEBUG */
586 static void rbd_osd_copyup_callback(struct rbd_obj_request *obj_request);
587 static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request);
588 static void rbd_img_parent_read(struct rbd_obj_request *obj_request);
589 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
591 static int rbd_dev_refresh(struct rbd_device *rbd_dev);
592 static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev);
593 static int rbd_dev_header_info(struct rbd_device *rbd_dev);
594 static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev);
595 static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
597 static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
598 u8 *order, u64 *snap_size);
599 static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
602 static int rbd_open(struct block_device *bdev, fmode_t mode)
604 struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
605 bool removing = false;
607 if ((mode & FMODE_WRITE) && rbd_dev->mapping.read_only)
610 spin_lock_irq(&rbd_dev->lock);
611 if (test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags))
614 rbd_dev->open_count++;
615 spin_unlock_irq(&rbd_dev->lock);
619 (void) get_device(&rbd_dev->dev);
624 static void rbd_release(struct gendisk *disk, fmode_t mode)
626 struct rbd_device *rbd_dev = disk->private_data;
627 unsigned long open_count_before;
629 spin_lock_irq(&rbd_dev->lock);
630 open_count_before = rbd_dev->open_count--;
631 spin_unlock_irq(&rbd_dev->lock);
632 rbd_assert(open_count_before > 0);
634 put_device(&rbd_dev->dev);
637 static int rbd_ioctl_set_ro(struct rbd_device *rbd_dev, unsigned long arg)
642 bool ro_changed = false;
644 /* get_user() may sleep, so call it before taking rbd_dev->lock */
645 if (get_user(val, (int __user *)(arg)))
648 ro = val ? true : false;
649 /* Snapshot doesn't allow to write*/
650 if (rbd_dev->spec->snap_id != CEPH_NOSNAP && !ro)
653 spin_lock_irq(&rbd_dev->lock);
654 /* prevent others open this device */
655 if (rbd_dev->open_count > 1) {
660 if (rbd_dev->mapping.read_only != ro) {
661 rbd_dev->mapping.read_only = ro;
666 spin_unlock_irq(&rbd_dev->lock);
667 /* set_disk_ro() may sleep, so call it after releasing rbd_dev->lock */
668 if (ret == 0 && ro_changed)
669 set_disk_ro(rbd_dev->disk, ro ? 1 : 0);
674 static int rbd_ioctl(struct block_device *bdev, fmode_t mode,
675 unsigned int cmd, unsigned long arg)
677 struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
682 ret = rbd_ioctl_set_ro(rbd_dev, arg);
692 static int rbd_compat_ioctl(struct block_device *bdev, fmode_t mode,
693 unsigned int cmd, unsigned long arg)
695 return rbd_ioctl(bdev, mode, cmd, arg);
697 #endif /* CONFIG_COMPAT */
699 static const struct block_device_operations rbd_bd_ops = {
700 .owner = THIS_MODULE,
702 .release = rbd_release,
705 .compat_ioctl = rbd_compat_ioctl,
710 * Initialize an rbd client instance. Success or not, this function
711 * consumes ceph_opts. Caller holds client_mutex.
713 static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts)
715 struct rbd_client *rbdc;
718 dout("%s:\n", __func__);
719 rbdc = kmalloc(sizeof(struct rbd_client), GFP_KERNEL);
723 kref_init(&rbdc->kref);
724 INIT_LIST_HEAD(&rbdc->node);
726 rbdc->client = ceph_create_client(ceph_opts, rbdc, 0, 0);
727 if (IS_ERR(rbdc->client))
729 ceph_opts = NULL; /* Now rbdc->client is responsible for ceph_opts */
731 ret = ceph_open_session(rbdc->client);
735 spin_lock(&rbd_client_list_lock);
736 list_add_tail(&rbdc->node, &rbd_client_list);
737 spin_unlock(&rbd_client_list_lock);
739 dout("%s: rbdc %p\n", __func__, rbdc);
743 ceph_destroy_client(rbdc->client);
748 ceph_destroy_options(ceph_opts);
749 dout("%s: error %d\n", __func__, ret);
754 static struct rbd_client *__rbd_get_client(struct rbd_client *rbdc)
756 kref_get(&rbdc->kref);
762 * Find a ceph client with specific addr and configuration. If
763 * found, bump its reference count.
765 static struct rbd_client *rbd_client_find(struct ceph_options *ceph_opts)
767 struct rbd_client *client_node;
770 if (ceph_opts->flags & CEPH_OPT_NOSHARE)
773 spin_lock(&rbd_client_list_lock);
774 list_for_each_entry(client_node, &rbd_client_list, node) {
775 if (!ceph_compare_options(ceph_opts, client_node->client)) {
776 __rbd_get_client(client_node);
782 spin_unlock(&rbd_client_list_lock);
784 return found ? client_node : NULL;
788 * (Per device) rbd map options
795 /* string args above */
802 static match_table_t rbd_opts_tokens = {
803 {Opt_queue_depth, "queue_depth=%d"},
805 /* string args above */
806 {Opt_read_only, "read_only"},
807 {Opt_read_only, "ro"}, /* Alternate spelling */
808 {Opt_read_write, "read_write"},
809 {Opt_read_write, "rw"}, /* Alternate spelling */
810 {Opt_lock_on_read, "lock_on_read"},
820 #define RBD_QUEUE_DEPTH_DEFAULT BLKDEV_MAX_RQ
821 #define RBD_READ_ONLY_DEFAULT false
822 #define RBD_LOCK_ON_READ_DEFAULT false
824 static int parse_rbd_opts_token(char *c, void *private)
826 struct rbd_options *rbd_opts = private;
827 substring_t argstr[MAX_OPT_ARGS];
828 int token, intval, ret;
830 token = match_token(c, rbd_opts_tokens, argstr);
831 if (token < Opt_last_int) {
832 ret = match_int(&argstr[0], &intval);
834 pr_err("bad mount option arg (not int) at '%s'\n", c);
837 dout("got int token %d val %d\n", token, intval);
838 } else if (token > Opt_last_int && token < Opt_last_string) {
839 dout("got string token %d val %s\n", token, argstr[0].from);
841 dout("got token %d\n", token);
845 case Opt_queue_depth:
847 pr_err("queue_depth out of range\n");
850 rbd_opts->queue_depth = intval;
853 rbd_opts->read_only = true;
856 rbd_opts->read_only = false;
858 case Opt_lock_on_read:
859 rbd_opts->lock_on_read = true;
862 /* libceph prints "bad option" msg */
869 static char* obj_op_name(enum obj_operation_type op_type)
884 * Get a ceph client with specific addr and configuration, if one does
885 * not exist create it. Either way, ceph_opts is consumed by this
888 static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts)
890 struct rbd_client *rbdc;
892 mutex_lock_nested(&client_mutex, SINGLE_DEPTH_NESTING);
893 rbdc = rbd_client_find(ceph_opts);
894 if (rbdc) /* using an existing client */
895 ceph_destroy_options(ceph_opts);
897 rbdc = rbd_client_create(ceph_opts);
898 mutex_unlock(&client_mutex);
904 * Destroy ceph client
906 * Caller must hold rbd_client_list_lock.
908 static void rbd_client_release(struct kref *kref)
910 struct rbd_client *rbdc = container_of(kref, struct rbd_client, kref);
912 dout("%s: rbdc %p\n", __func__, rbdc);
913 spin_lock(&rbd_client_list_lock);
914 list_del(&rbdc->node);
915 spin_unlock(&rbd_client_list_lock);
917 ceph_destroy_client(rbdc->client);
922 * Drop reference to ceph client node. If it's not referenced anymore, release
925 static void rbd_put_client(struct rbd_client *rbdc)
928 kref_put(&rbdc->kref, rbd_client_release);
931 static bool rbd_image_format_valid(u32 image_format)
933 return image_format == 1 || image_format == 2;
936 static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk *ondisk)
941 /* The header has to start with the magic rbd header text */
942 if (memcmp(&ondisk->text, RBD_HEADER_TEXT, sizeof (RBD_HEADER_TEXT)))
945 /* The bio layer requires at least sector-sized I/O */
947 if (ondisk->options.order < SECTOR_SHIFT)
950 /* If we use u64 in a few spots we may be able to loosen this */
952 if (ondisk->options.order > 8 * sizeof (int) - 1)
956 * The size of a snapshot header has to fit in a size_t, and
957 * that limits the number of snapshots.
959 snap_count = le32_to_cpu(ondisk->snap_count);
960 size = SIZE_MAX - sizeof (struct ceph_snap_context);
961 if (snap_count > size / sizeof (__le64))
965 * Not only that, but the size of the entire the snapshot
966 * header must also be representable in a size_t.
968 size -= snap_count * sizeof (__le64);
969 if ((u64) size < le64_to_cpu(ondisk->snap_names_len))
976 * Fill an rbd image header with information from the given format 1
979 static int rbd_header_from_disk(struct rbd_device *rbd_dev,
980 struct rbd_image_header_ondisk *ondisk)
982 struct rbd_image_header *header = &rbd_dev->header;
983 bool first_time = header->object_prefix == NULL;
984 struct ceph_snap_context *snapc;
985 char *object_prefix = NULL;
986 char *snap_names = NULL;
987 u64 *snap_sizes = NULL;
993 /* Allocate this now to avoid having to handle failure below */
998 len = strnlen(ondisk->object_prefix,
999 sizeof (ondisk->object_prefix));
1000 object_prefix = kmalloc(len + 1, GFP_KERNEL);
1003 memcpy(object_prefix, ondisk->object_prefix, len);
1004 object_prefix[len] = '\0';
1007 /* Allocate the snapshot context and fill it in */
1009 snap_count = le32_to_cpu(ondisk->snap_count);
1010 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
1013 snapc->seq = le64_to_cpu(ondisk->snap_seq);
1015 struct rbd_image_snap_ondisk *snaps;
1016 u64 snap_names_len = le64_to_cpu(ondisk->snap_names_len);
1018 /* We'll keep a copy of the snapshot names... */
1020 if (snap_names_len > (u64)SIZE_MAX)
1022 snap_names = kmalloc(snap_names_len, GFP_KERNEL);
1026 /* ...as well as the array of their sizes. */
1028 size = snap_count * sizeof (*header->snap_sizes);
1029 snap_sizes = kmalloc(size, GFP_KERNEL);
1034 * Copy the names, and fill in each snapshot's id
1037 * Note that rbd_dev_v1_header_info() guarantees the
1038 * ondisk buffer we're working with has
1039 * snap_names_len bytes beyond the end of the
1040 * snapshot id array, this memcpy() is safe.
1042 memcpy(snap_names, &ondisk->snaps[snap_count], snap_names_len);
1043 snaps = ondisk->snaps;
1044 for (i = 0; i < snap_count; i++) {
1045 snapc->snaps[i] = le64_to_cpu(snaps[i].id);
1046 snap_sizes[i] = le64_to_cpu(snaps[i].image_size);
1050 /* We won't fail any more, fill in the header */
1053 header->object_prefix = object_prefix;
1054 header->obj_order = ondisk->options.order;
1055 header->crypt_type = ondisk->options.crypt_type;
1056 header->comp_type = ondisk->options.comp_type;
1057 /* The rest aren't used for format 1 images */
1058 header->stripe_unit = 0;
1059 header->stripe_count = 0;
1060 header->features = 0;
1062 ceph_put_snap_context(header->snapc);
1063 kfree(header->snap_names);
1064 kfree(header->snap_sizes);
1067 /* The remaining fields always get updated (when we refresh) */
1069 header->image_size = le64_to_cpu(ondisk->image_size);
1070 header->snapc = snapc;
1071 header->snap_names = snap_names;
1072 header->snap_sizes = snap_sizes;
1080 ceph_put_snap_context(snapc);
1081 kfree(object_prefix);
1086 static const char *_rbd_dev_v1_snap_name(struct rbd_device *rbd_dev, u32 which)
1088 const char *snap_name;
1090 rbd_assert(which < rbd_dev->header.snapc->num_snaps);
1092 /* Skip over names until we find the one we are looking for */
1094 snap_name = rbd_dev->header.snap_names;
1096 snap_name += strlen(snap_name) + 1;
1098 return kstrdup(snap_name, GFP_KERNEL);
1102 * Snapshot id comparison function for use with qsort()/bsearch().
1103 * Note that result is for snapshots in *descending* order.
1105 static int snapid_compare_reverse(const void *s1, const void *s2)
1107 u64 snap_id1 = *(u64 *)s1;
1108 u64 snap_id2 = *(u64 *)s2;
1110 if (snap_id1 < snap_id2)
1112 return snap_id1 == snap_id2 ? 0 : -1;
1116 * Search a snapshot context to see if the given snapshot id is
1119 * Returns the position of the snapshot id in the array if it's found,
1120 * or BAD_SNAP_INDEX otherwise.
1122 * Note: The snapshot array is in kept sorted (by the osd) in
1123 * reverse order, highest snapshot id first.
1125 static u32 rbd_dev_snap_index(struct rbd_device *rbd_dev, u64 snap_id)
1127 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
1130 found = bsearch(&snap_id, &snapc->snaps, snapc->num_snaps,
1131 sizeof (snap_id), snapid_compare_reverse);
1133 return found ? (u32)(found - &snapc->snaps[0]) : BAD_SNAP_INDEX;
1136 static const char *rbd_dev_v1_snap_name(struct rbd_device *rbd_dev,
1140 const char *snap_name;
1142 which = rbd_dev_snap_index(rbd_dev, snap_id);
1143 if (which == BAD_SNAP_INDEX)
1144 return ERR_PTR(-ENOENT);
1146 snap_name = _rbd_dev_v1_snap_name(rbd_dev, which);
1147 return snap_name ? snap_name : ERR_PTR(-ENOMEM);
1150 static const char *rbd_snap_name(struct rbd_device *rbd_dev, u64 snap_id)
1152 if (snap_id == CEPH_NOSNAP)
1153 return RBD_SNAP_HEAD_NAME;
1155 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1156 if (rbd_dev->image_format == 1)
1157 return rbd_dev_v1_snap_name(rbd_dev, snap_id);
1159 return rbd_dev_v2_snap_name(rbd_dev, snap_id);
1162 static int rbd_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
1165 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1166 if (snap_id == CEPH_NOSNAP) {
1167 *snap_size = rbd_dev->header.image_size;
1168 } else if (rbd_dev->image_format == 1) {
1171 which = rbd_dev_snap_index(rbd_dev, snap_id);
1172 if (which == BAD_SNAP_INDEX)
1175 *snap_size = rbd_dev->header.snap_sizes[which];
1180 ret = _rbd_dev_v2_snap_size(rbd_dev, snap_id, NULL, &size);
1189 static int rbd_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
1192 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1193 if (snap_id == CEPH_NOSNAP) {
1194 *snap_features = rbd_dev->header.features;
1195 } else if (rbd_dev->image_format == 1) {
1196 *snap_features = 0; /* No features for format 1 */
1201 ret = _rbd_dev_v2_snap_features(rbd_dev, snap_id, &features);
1205 *snap_features = features;
1210 static int rbd_dev_mapping_set(struct rbd_device *rbd_dev)
1212 u64 snap_id = rbd_dev->spec->snap_id;
1217 ret = rbd_snap_size(rbd_dev, snap_id, &size);
1220 ret = rbd_snap_features(rbd_dev, snap_id, &features);
1224 rbd_dev->mapping.size = size;
1225 rbd_dev->mapping.features = features;
1230 static void rbd_dev_mapping_clear(struct rbd_device *rbd_dev)
1232 rbd_dev->mapping.size = 0;
1233 rbd_dev->mapping.features = 0;
1236 static void rbd_segment_name_free(const char *name)
1238 /* The explicit cast here is needed to drop the const qualifier */
1240 kmem_cache_free(rbd_segment_name_cache, (void *)name);
1243 static const char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset)
1250 name = kmem_cache_alloc(rbd_segment_name_cache, GFP_NOIO);
1253 segment = offset >> rbd_dev->header.obj_order;
1254 name_format = "%s.%012llx";
1255 if (rbd_dev->image_format == 2)
1256 name_format = "%s.%016llx";
1257 ret = snprintf(name, CEPH_MAX_OID_NAME_LEN + 1, name_format,
1258 rbd_dev->header.object_prefix, segment);
1259 if (ret < 0 || ret > CEPH_MAX_OID_NAME_LEN) {
1260 pr_err("error formatting segment name for #%llu (%d)\n",
1262 rbd_segment_name_free(name);
1269 static u64 rbd_segment_offset(struct rbd_device *rbd_dev, u64 offset)
1271 u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
1273 return offset & (segment_size - 1);
1276 static u64 rbd_segment_length(struct rbd_device *rbd_dev,
1277 u64 offset, u64 length)
1279 u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
1281 offset &= segment_size - 1;
1283 rbd_assert(length <= U64_MAX - offset);
1284 if (offset + length > segment_size)
1285 length = segment_size - offset;
1291 * returns the size of an object in the image
1293 static u64 rbd_obj_bytes(struct rbd_image_header *header)
1295 return 1 << header->obj_order;
1302 static void bio_chain_put(struct bio *chain)
1308 chain = chain->bi_next;
1314 * zeros a bio chain, starting at specific offset
1316 static void zero_bio_chain(struct bio *chain, int start_ofs)
1319 struct bvec_iter iter;
1320 unsigned long flags;
1325 bio_for_each_segment(bv, chain, iter) {
1326 if (pos + bv.bv_len > start_ofs) {
1327 int remainder = max(start_ofs - pos, 0);
1328 buf = bvec_kmap_irq(&bv, &flags);
1329 memset(buf + remainder, 0,
1330 bv.bv_len - remainder);
1331 flush_dcache_page(bv.bv_page);
1332 bvec_kunmap_irq(buf, &flags);
1337 chain = chain->bi_next;
1342 * similar to zero_bio_chain(), zeros data defined by a page array,
1343 * starting at the given byte offset from the start of the array and
1344 * continuing up to the given end offset. The pages array is
1345 * assumed to be big enough to hold all bytes up to the end.
1347 static void zero_pages(struct page **pages, u64 offset, u64 end)
1349 struct page **page = &pages[offset >> PAGE_SHIFT];
1351 rbd_assert(end > offset);
1352 rbd_assert(end - offset <= (u64)SIZE_MAX);
1353 while (offset < end) {
1356 unsigned long flags;
1359 page_offset = offset & ~PAGE_MASK;
1360 length = min_t(size_t, PAGE_SIZE - page_offset, end - offset);
1361 local_irq_save(flags);
1362 kaddr = kmap_atomic(*page);
1363 memset(kaddr + page_offset, 0, length);
1364 flush_dcache_page(*page);
1365 kunmap_atomic(kaddr);
1366 local_irq_restore(flags);
1374 * Clone a portion of a bio, starting at the given byte offset
1375 * and continuing for the number of bytes indicated.
1377 static struct bio *bio_clone_range(struct bio *bio_src,
1378 unsigned int offset,
1384 bio = bio_clone(bio_src, gfpmask);
1386 return NULL; /* ENOMEM */
1388 bio_advance(bio, offset);
1389 bio->bi_iter.bi_size = len;
1395 * Clone a portion of a bio chain, starting at the given byte offset
1396 * into the first bio in the source chain and continuing for the
1397 * number of bytes indicated. The result is another bio chain of
1398 * exactly the given length, or a null pointer on error.
1400 * The bio_src and offset parameters are both in-out. On entry they
1401 * refer to the first source bio and the offset into that bio where
1402 * the start of data to be cloned is located.
1404 * On return, bio_src is updated to refer to the bio in the source
1405 * chain that contains first un-cloned byte, and *offset will
1406 * contain the offset of that byte within that bio.
1408 static struct bio *bio_chain_clone_range(struct bio **bio_src,
1409 unsigned int *offset,
1413 struct bio *bi = *bio_src;
1414 unsigned int off = *offset;
1415 struct bio *chain = NULL;
1418 /* Build up a chain of clone bios up to the limit */
1420 if (!bi || off >= bi->bi_iter.bi_size || !len)
1421 return NULL; /* Nothing to clone */
1425 unsigned int bi_size;
1429 rbd_warn(NULL, "bio_chain exhausted with %u left", len);
1430 goto out_err; /* EINVAL; ran out of bio's */
1432 bi_size = min_t(unsigned int, bi->bi_iter.bi_size - off, len);
1433 bio = bio_clone_range(bi, off, bi_size, gfpmask);
1435 goto out_err; /* ENOMEM */
1438 end = &bio->bi_next;
1441 if (off == bi->bi_iter.bi_size) {
1452 bio_chain_put(chain);
1458 * The default/initial value for all object request flags is 0. For
1459 * each flag, once its value is set to 1 it is never reset to 0
1462 static void obj_request_img_data_set(struct rbd_obj_request *obj_request)
1464 if (test_and_set_bit(OBJ_REQ_IMG_DATA, &obj_request->flags)) {
1465 struct rbd_device *rbd_dev;
1467 rbd_dev = obj_request->img_request->rbd_dev;
1468 rbd_warn(rbd_dev, "obj_request %p already marked img_data",
1473 static bool obj_request_img_data_test(struct rbd_obj_request *obj_request)
1476 return test_bit(OBJ_REQ_IMG_DATA, &obj_request->flags) != 0;
1479 static void obj_request_done_set(struct rbd_obj_request *obj_request)
1481 if (test_and_set_bit(OBJ_REQ_DONE, &obj_request->flags)) {
1482 struct rbd_device *rbd_dev = NULL;
1484 if (obj_request_img_data_test(obj_request))
1485 rbd_dev = obj_request->img_request->rbd_dev;
1486 rbd_warn(rbd_dev, "obj_request %p already marked done",
1491 static bool obj_request_done_test(struct rbd_obj_request *obj_request)
1494 return test_bit(OBJ_REQ_DONE, &obj_request->flags) != 0;
1498 * This sets the KNOWN flag after (possibly) setting the EXISTS
1499 * flag. The latter is set based on the "exists" value provided.
1501 * Note that for our purposes once an object exists it never goes
1502 * away again. It's possible that the response from two existence
1503 * checks are separated by the creation of the target object, and
1504 * the first ("doesn't exist") response arrives *after* the second
1505 * ("does exist"). In that case we ignore the second one.
1507 static void obj_request_existence_set(struct rbd_obj_request *obj_request,
1511 set_bit(OBJ_REQ_EXISTS, &obj_request->flags);
1512 set_bit(OBJ_REQ_KNOWN, &obj_request->flags);
1516 static bool obj_request_known_test(struct rbd_obj_request *obj_request)
1519 return test_bit(OBJ_REQ_KNOWN, &obj_request->flags) != 0;
1522 static bool obj_request_exists_test(struct rbd_obj_request *obj_request)
1525 return test_bit(OBJ_REQ_EXISTS, &obj_request->flags) != 0;
1528 static bool obj_request_overlaps_parent(struct rbd_obj_request *obj_request)
1530 struct rbd_device *rbd_dev = obj_request->img_request->rbd_dev;
1532 return obj_request->img_offset <
1533 round_up(rbd_dev->parent_overlap, rbd_obj_bytes(&rbd_dev->header));
1536 static void rbd_obj_request_get(struct rbd_obj_request *obj_request)
1538 dout("%s: obj %p (was %d)\n", __func__, obj_request,
1539 atomic_read(&obj_request->kref.refcount));
1540 kref_get(&obj_request->kref);
1543 static void rbd_obj_request_destroy(struct kref *kref);
1544 static void rbd_obj_request_put(struct rbd_obj_request *obj_request)
1546 rbd_assert(obj_request != NULL);
1547 dout("%s: obj %p (was %d)\n", __func__, obj_request,
1548 atomic_read(&obj_request->kref.refcount));
1549 kref_put(&obj_request->kref, rbd_obj_request_destroy);
1552 static void rbd_img_request_get(struct rbd_img_request *img_request)
1554 dout("%s: img %p (was %d)\n", __func__, img_request,
1555 atomic_read(&img_request->kref.refcount));
1556 kref_get(&img_request->kref);
1559 static bool img_request_child_test(struct rbd_img_request *img_request);
1560 static void rbd_parent_request_destroy(struct kref *kref);
1561 static void rbd_img_request_destroy(struct kref *kref);
1562 static void rbd_img_request_put(struct rbd_img_request *img_request)
1564 rbd_assert(img_request != NULL);
1565 dout("%s: img %p (was %d)\n", __func__, img_request,
1566 atomic_read(&img_request->kref.refcount));
1567 if (img_request_child_test(img_request))
1568 kref_put(&img_request->kref, rbd_parent_request_destroy);
1570 kref_put(&img_request->kref, rbd_img_request_destroy);
1573 static inline void rbd_img_obj_request_add(struct rbd_img_request *img_request,
1574 struct rbd_obj_request *obj_request)
1576 rbd_assert(obj_request->img_request == NULL);
1578 /* Image request now owns object's original reference */
1579 obj_request->img_request = img_request;
1580 obj_request->which = img_request->obj_request_count;
1581 rbd_assert(!obj_request_img_data_test(obj_request));
1582 obj_request_img_data_set(obj_request);
1583 rbd_assert(obj_request->which != BAD_WHICH);
1584 img_request->obj_request_count++;
1585 list_add_tail(&obj_request->links, &img_request->obj_requests);
1586 dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
1587 obj_request->which);
1590 static inline void rbd_img_obj_request_del(struct rbd_img_request *img_request,
1591 struct rbd_obj_request *obj_request)
1593 rbd_assert(obj_request->which != BAD_WHICH);
1595 dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
1596 obj_request->which);
1597 list_del(&obj_request->links);
1598 rbd_assert(img_request->obj_request_count > 0);
1599 img_request->obj_request_count--;
1600 rbd_assert(obj_request->which == img_request->obj_request_count);
1601 obj_request->which = BAD_WHICH;
1602 rbd_assert(obj_request_img_data_test(obj_request));
1603 rbd_assert(obj_request->img_request == img_request);
1604 obj_request->img_request = NULL;
1605 obj_request->callback = NULL;
1606 rbd_obj_request_put(obj_request);
1609 static bool obj_request_type_valid(enum obj_request_type type)
1612 case OBJ_REQUEST_NODATA:
1613 case OBJ_REQUEST_BIO:
1614 case OBJ_REQUEST_PAGES:
1621 static void rbd_img_obj_callback(struct rbd_obj_request *obj_request);
1623 static void rbd_obj_request_submit(struct rbd_obj_request *obj_request)
1625 struct ceph_osd_request *osd_req = obj_request->osd_req;
1627 dout("%s %p osd_req %p\n", __func__, obj_request, osd_req);
1628 if (obj_request_img_data_test(obj_request)) {
1629 WARN_ON(obj_request->callback != rbd_img_obj_callback);
1630 rbd_img_request_get(obj_request->img_request);
1632 ceph_osdc_start_request(osd_req->r_osdc, osd_req, false);
1635 static void rbd_obj_request_end(struct rbd_obj_request *obj_request)
1637 dout("%s %p\n", __func__, obj_request);
1638 ceph_osdc_cancel_request(obj_request->osd_req);
1642 * Wait for an object request to complete. If interrupted, cancel the
1643 * underlying osd request.
1645 * @timeout: in jiffies, 0 means "wait forever"
1647 static int __rbd_obj_request_wait(struct rbd_obj_request *obj_request,
1648 unsigned long timeout)
1652 dout("%s %p\n", __func__, obj_request);
1653 ret = wait_for_completion_interruptible_timeout(
1654 &obj_request->completion,
1655 ceph_timeout_jiffies(timeout));
1659 rbd_obj_request_end(obj_request);
1664 dout("%s %p ret %d\n", __func__, obj_request, (int)ret);
1668 static int rbd_obj_request_wait(struct rbd_obj_request *obj_request)
1670 return __rbd_obj_request_wait(obj_request, 0);
1673 static void rbd_img_request_complete(struct rbd_img_request *img_request)
1676 dout("%s: img %p\n", __func__, img_request);
1679 * If no error occurred, compute the aggregate transfer
1680 * count for the image request. We could instead use
1681 * atomic64_cmpxchg() to update it as each object request
1682 * completes; not clear which way is better off hand.
1684 if (!img_request->result) {
1685 struct rbd_obj_request *obj_request;
1688 for_each_obj_request(img_request, obj_request)
1689 xferred += obj_request->xferred;
1690 img_request->xferred = xferred;
1693 if (img_request->callback)
1694 img_request->callback(img_request);
1696 rbd_img_request_put(img_request);
1700 * The default/initial value for all image request flags is 0. Each
1701 * is conditionally set to 1 at image request initialization time
1702 * and currently never change thereafter.
1704 static void img_request_write_set(struct rbd_img_request *img_request)
1706 set_bit(IMG_REQ_WRITE, &img_request->flags);
1710 static bool img_request_write_test(struct rbd_img_request *img_request)
1713 return test_bit(IMG_REQ_WRITE, &img_request->flags) != 0;
1717 * Set the discard flag when the img_request is an discard request
1719 static void img_request_discard_set(struct rbd_img_request *img_request)
1721 set_bit(IMG_REQ_DISCARD, &img_request->flags);
1725 static bool img_request_discard_test(struct rbd_img_request *img_request)
1728 return test_bit(IMG_REQ_DISCARD, &img_request->flags) != 0;
1731 static void img_request_child_set(struct rbd_img_request *img_request)
1733 set_bit(IMG_REQ_CHILD, &img_request->flags);
1737 static void img_request_child_clear(struct rbd_img_request *img_request)
1739 clear_bit(IMG_REQ_CHILD, &img_request->flags);
1743 static bool img_request_child_test(struct rbd_img_request *img_request)
1746 return test_bit(IMG_REQ_CHILD, &img_request->flags) != 0;
1749 static void img_request_layered_set(struct rbd_img_request *img_request)
1751 set_bit(IMG_REQ_LAYERED, &img_request->flags);
1755 static void img_request_layered_clear(struct rbd_img_request *img_request)
1757 clear_bit(IMG_REQ_LAYERED, &img_request->flags);
1761 static bool img_request_layered_test(struct rbd_img_request *img_request)
1764 return test_bit(IMG_REQ_LAYERED, &img_request->flags) != 0;
1767 static enum obj_operation_type
1768 rbd_img_request_op_type(struct rbd_img_request *img_request)
1770 if (img_request_write_test(img_request))
1771 return OBJ_OP_WRITE;
1772 else if (img_request_discard_test(img_request))
1773 return OBJ_OP_DISCARD;
1779 rbd_img_obj_request_read_callback(struct rbd_obj_request *obj_request)
1781 u64 xferred = obj_request->xferred;
1782 u64 length = obj_request->length;
1784 dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
1785 obj_request, obj_request->img_request, obj_request->result,
1788 * ENOENT means a hole in the image. We zero-fill the entire
1789 * length of the request. A short read also implies zero-fill
1790 * to the end of the request. An error requires the whole
1791 * length of the request to be reported finished with an error
1792 * to the block layer. In each case we update the xferred
1793 * count to indicate the whole request was satisfied.
1795 rbd_assert(obj_request->type != OBJ_REQUEST_NODATA);
1796 if (obj_request->result == -ENOENT) {
1797 if (obj_request->type == OBJ_REQUEST_BIO)
1798 zero_bio_chain(obj_request->bio_list, 0);
1800 zero_pages(obj_request->pages, 0, length);
1801 obj_request->result = 0;
1802 } else if (xferred < length && !obj_request->result) {
1803 if (obj_request->type == OBJ_REQUEST_BIO)
1804 zero_bio_chain(obj_request->bio_list, xferred);
1806 zero_pages(obj_request->pages, xferred, length);
1808 obj_request->xferred = length;
1809 obj_request_done_set(obj_request);
1812 static void rbd_obj_request_complete(struct rbd_obj_request *obj_request)
1814 dout("%s: obj %p cb %p\n", __func__, obj_request,
1815 obj_request->callback);
1816 if (obj_request->callback)
1817 obj_request->callback(obj_request);
1819 complete_all(&obj_request->completion);
1822 static void rbd_osd_read_callback(struct rbd_obj_request *obj_request)
1824 struct rbd_img_request *img_request = NULL;
1825 struct rbd_device *rbd_dev = NULL;
1826 bool layered = false;
1828 if (obj_request_img_data_test(obj_request)) {
1829 img_request = obj_request->img_request;
1830 layered = img_request && img_request_layered_test(img_request);
1831 rbd_dev = img_request->rbd_dev;
1834 dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
1835 obj_request, img_request, obj_request->result,
1836 obj_request->xferred, obj_request->length);
1837 if (layered && obj_request->result == -ENOENT &&
1838 obj_request->img_offset < rbd_dev->parent_overlap)
1839 rbd_img_parent_read(obj_request);
1840 else if (img_request)
1841 rbd_img_obj_request_read_callback(obj_request);
1843 obj_request_done_set(obj_request);
1846 static void rbd_osd_write_callback(struct rbd_obj_request *obj_request)
1848 dout("%s: obj %p result %d %llu\n", __func__, obj_request,
1849 obj_request->result, obj_request->length);
1851 * There is no such thing as a successful short write. Set
1852 * it to our originally-requested length.
1854 obj_request->xferred = obj_request->length;
1855 obj_request_done_set(obj_request);
1858 static void rbd_osd_discard_callback(struct rbd_obj_request *obj_request)
1860 dout("%s: obj %p result %d %llu\n", __func__, obj_request,
1861 obj_request->result, obj_request->length);
1863 * There is no such thing as a successful short discard. Set
1864 * it to our originally-requested length.
1866 obj_request->xferred = obj_request->length;
1867 /* discarding a non-existent object is not a problem */
1868 if (obj_request->result == -ENOENT)
1869 obj_request->result = 0;
1870 obj_request_done_set(obj_request);
1874 * For a simple stat call there's nothing to do. We'll do more if
1875 * this is part of a write sequence for a layered image.
1877 static void rbd_osd_stat_callback(struct rbd_obj_request *obj_request)
1879 dout("%s: obj %p\n", __func__, obj_request);
1880 obj_request_done_set(obj_request);
1883 static void rbd_osd_call_callback(struct rbd_obj_request *obj_request)
1885 dout("%s: obj %p\n", __func__, obj_request);
1887 if (obj_request_img_data_test(obj_request))
1888 rbd_osd_copyup_callback(obj_request);
1890 obj_request_done_set(obj_request);
1893 static void rbd_osd_req_callback(struct ceph_osd_request *osd_req)
1895 struct rbd_obj_request *obj_request = osd_req->r_priv;
1898 dout("%s: osd_req %p\n", __func__, osd_req);
1899 rbd_assert(osd_req == obj_request->osd_req);
1900 if (obj_request_img_data_test(obj_request)) {
1901 rbd_assert(obj_request->img_request);
1902 rbd_assert(obj_request->which != BAD_WHICH);
1904 rbd_assert(obj_request->which == BAD_WHICH);
1907 if (osd_req->r_result < 0)
1908 obj_request->result = osd_req->r_result;
1911 * We support a 64-bit length, but ultimately it has to be
1912 * passed to the block layer, which just supports a 32-bit
1915 obj_request->xferred = osd_req->r_ops[0].outdata_len;
1916 rbd_assert(obj_request->xferred < (u64)UINT_MAX);
1918 opcode = osd_req->r_ops[0].op;
1920 case CEPH_OSD_OP_READ:
1921 rbd_osd_read_callback(obj_request);
1923 case CEPH_OSD_OP_SETALLOCHINT:
1924 rbd_assert(osd_req->r_ops[1].op == CEPH_OSD_OP_WRITE ||
1925 osd_req->r_ops[1].op == CEPH_OSD_OP_WRITEFULL);
1927 case CEPH_OSD_OP_WRITE:
1928 case CEPH_OSD_OP_WRITEFULL:
1929 rbd_osd_write_callback(obj_request);
1931 case CEPH_OSD_OP_STAT:
1932 rbd_osd_stat_callback(obj_request);
1934 case CEPH_OSD_OP_DELETE:
1935 case CEPH_OSD_OP_TRUNCATE:
1936 case CEPH_OSD_OP_ZERO:
1937 rbd_osd_discard_callback(obj_request);
1939 case CEPH_OSD_OP_CALL:
1940 rbd_osd_call_callback(obj_request);
1943 rbd_warn(NULL, "%s: unsupported op %hu",
1944 obj_request->object_name, (unsigned short) opcode);
1948 if (obj_request_done_test(obj_request))
1949 rbd_obj_request_complete(obj_request);
1952 static void rbd_osd_req_format_read(struct rbd_obj_request *obj_request)
1954 struct ceph_osd_request *osd_req = obj_request->osd_req;
1956 rbd_assert(obj_request_img_data_test(obj_request));
1957 osd_req->r_snapid = obj_request->img_request->snap_id;
1960 static void rbd_osd_req_format_write(struct rbd_obj_request *obj_request)
1962 struct ceph_osd_request *osd_req = obj_request->osd_req;
1964 osd_req->r_mtime = CURRENT_TIME;
1965 osd_req->r_data_offset = obj_request->offset;
1969 * Create an osd request. A read request has one osd op (read).
1970 * A write request has either one (watch) or two (hint+write) osd ops.
1971 * (All rbd data writes are prefixed with an allocation hint op, but
1972 * technically osd watch is a write request, hence this distinction.)
1974 static struct ceph_osd_request *rbd_osd_req_create(
1975 struct rbd_device *rbd_dev,
1976 enum obj_operation_type op_type,
1977 unsigned int num_ops,
1978 struct rbd_obj_request *obj_request)
1980 struct ceph_snap_context *snapc = NULL;
1981 struct ceph_osd_client *osdc;
1982 struct ceph_osd_request *osd_req;
1984 if (obj_request_img_data_test(obj_request) &&
1985 (op_type == OBJ_OP_DISCARD || op_type == OBJ_OP_WRITE)) {
1986 struct rbd_img_request *img_request = obj_request->img_request;
1987 if (op_type == OBJ_OP_WRITE) {
1988 rbd_assert(img_request_write_test(img_request));
1990 rbd_assert(img_request_discard_test(img_request));
1992 snapc = img_request->snapc;
1995 rbd_assert(num_ops == 1 || ((op_type == OBJ_OP_WRITE) && num_ops == 2));
1997 /* Allocate and initialize the request, for the num_ops ops */
1999 osdc = &rbd_dev->rbd_client->client->osdc;
2000 osd_req = ceph_osdc_alloc_request(osdc, snapc, num_ops, false,
2005 if (op_type == OBJ_OP_WRITE || op_type == OBJ_OP_DISCARD)
2006 osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
2008 osd_req->r_flags = CEPH_OSD_FLAG_READ;
2010 osd_req->r_callback = rbd_osd_req_callback;
2011 osd_req->r_priv = obj_request;
2013 osd_req->r_base_oloc.pool = rbd_dev->layout.pool_id;
2014 if (ceph_oid_aprintf(&osd_req->r_base_oid, GFP_NOIO, "%s",
2015 obj_request->object_name))
2018 if (ceph_osdc_alloc_messages(osd_req, GFP_NOIO))
2024 ceph_osdc_put_request(osd_req);
2029 * Create a copyup osd request based on the information in the object
2030 * request supplied. A copyup request has two or three osd ops, a
2031 * copyup method call, potentially a hint op, and a write or truncate
2034 static struct ceph_osd_request *
2035 rbd_osd_req_create_copyup(struct rbd_obj_request *obj_request)
2037 struct rbd_img_request *img_request;
2038 struct ceph_snap_context *snapc;
2039 struct rbd_device *rbd_dev;
2040 struct ceph_osd_client *osdc;
2041 struct ceph_osd_request *osd_req;
2042 int num_osd_ops = 3;
2044 rbd_assert(obj_request_img_data_test(obj_request));
2045 img_request = obj_request->img_request;
2046 rbd_assert(img_request);
2047 rbd_assert(img_request_write_test(img_request) ||
2048 img_request_discard_test(img_request));
2050 if (img_request_discard_test(img_request))
2053 /* Allocate and initialize the request, for all the ops */
2055 snapc = img_request->snapc;
2056 rbd_dev = img_request->rbd_dev;
2057 osdc = &rbd_dev->rbd_client->client->osdc;
2058 osd_req = ceph_osdc_alloc_request(osdc, snapc, num_osd_ops,
2063 osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
2064 osd_req->r_callback = rbd_osd_req_callback;
2065 osd_req->r_priv = obj_request;
2067 osd_req->r_base_oloc.pool = rbd_dev->layout.pool_id;
2068 if (ceph_oid_aprintf(&osd_req->r_base_oid, GFP_NOIO, "%s",
2069 obj_request->object_name))
2072 if (ceph_osdc_alloc_messages(osd_req, GFP_NOIO))
2078 ceph_osdc_put_request(osd_req);
2083 static void rbd_osd_req_destroy(struct ceph_osd_request *osd_req)
2085 ceph_osdc_put_request(osd_req);
2088 /* object_name is assumed to be a non-null pointer and NUL-terminated */
2090 static struct rbd_obj_request *rbd_obj_request_create(const char *object_name,
2091 u64 offset, u64 length,
2092 enum obj_request_type type)
2094 struct rbd_obj_request *obj_request;
2098 rbd_assert(obj_request_type_valid(type));
2100 size = strlen(object_name) + 1;
2101 name = kmalloc(size, GFP_NOIO);
2105 obj_request = kmem_cache_zalloc(rbd_obj_request_cache, GFP_NOIO);
2111 obj_request->object_name = memcpy(name, object_name, size);
2112 obj_request->offset = offset;
2113 obj_request->length = length;
2114 obj_request->flags = 0;
2115 obj_request->which = BAD_WHICH;
2116 obj_request->type = type;
2117 INIT_LIST_HEAD(&obj_request->links);
2118 init_completion(&obj_request->completion);
2119 kref_init(&obj_request->kref);
2121 dout("%s: \"%s\" %llu/%llu %d -> obj %p\n", __func__, object_name,
2122 offset, length, (int)type, obj_request);
2127 static void rbd_obj_request_destroy(struct kref *kref)
2129 struct rbd_obj_request *obj_request;
2131 obj_request = container_of(kref, struct rbd_obj_request, kref);
2133 dout("%s: obj %p\n", __func__, obj_request);
2135 rbd_assert(obj_request->img_request == NULL);
2136 rbd_assert(obj_request->which == BAD_WHICH);
2138 if (obj_request->osd_req)
2139 rbd_osd_req_destroy(obj_request->osd_req);
2141 rbd_assert(obj_request_type_valid(obj_request->type));
2142 switch (obj_request->type) {
2143 case OBJ_REQUEST_NODATA:
2144 break; /* Nothing to do */
2145 case OBJ_REQUEST_BIO:
2146 if (obj_request->bio_list)
2147 bio_chain_put(obj_request->bio_list);
2149 case OBJ_REQUEST_PAGES:
2150 /* img_data requests don't own their page array */
2151 if (obj_request->pages &&
2152 !obj_request_img_data_test(obj_request))
2153 ceph_release_page_vector(obj_request->pages,
2154 obj_request->page_count);
2158 kfree(obj_request->object_name);
2159 obj_request->object_name = NULL;
2160 kmem_cache_free(rbd_obj_request_cache, obj_request);
2163 /* It's OK to call this for a device with no parent */
2165 static void rbd_spec_put(struct rbd_spec *spec);
2166 static void rbd_dev_unparent(struct rbd_device *rbd_dev)
2168 rbd_dev_remove_parent(rbd_dev);
2169 rbd_spec_put(rbd_dev->parent_spec);
2170 rbd_dev->parent_spec = NULL;
2171 rbd_dev->parent_overlap = 0;
2175 * Parent image reference counting is used to determine when an
2176 * image's parent fields can be safely torn down--after there are no
2177 * more in-flight requests to the parent image. When the last
2178 * reference is dropped, cleaning them up is safe.
2180 static void rbd_dev_parent_put(struct rbd_device *rbd_dev)
2184 if (!rbd_dev->parent_spec)
2187 counter = atomic_dec_return_safe(&rbd_dev->parent_ref);
2191 /* Last reference; clean up parent data structures */
2194 rbd_dev_unparent(rbd_dev);
2196 rbd_warn(rbd_dev, "parent reference underflow");
2200 * If an image has a non-zero parent overlap, get a reference to its
2203 * Returns true if the rbd device has a parent with a non-zero
2204 * overlap and a reference for it was successfully taken, or
2207 static bool rbd_dev_parent_get(struct rbd_device *rbd_dev)
2211 if (!rbd_dev->parent_spec)
2214 down_read(&rbd_dev->header_rwsem);
2215 if (rbd_dev->parent_overlap)
2216 counter = atomic_inc_return_safe(&rbd_dev->parent_ref);
2217 up_read(&rbd_dev->header_rwsem);
2220 rbd_warn(rbd_dev, "parent reference overflow");
2226 * Caller is responsible for filling in the list of object requests
2227 * that comprises the image request, and the Linux request pointer
2228 * (if there is one).
2230 static struct rbd_img_request *rbd_img_request_create(
2231 struct rbd_device *rbd_dev,
2232 u64 offset, u64 length,
2233 enum obj_operation_type op_type,
2234 struct ceph_snap_context *snapc)
2236 struct rbd_img_request *img_request;
2238 img_request = kmem_cache_alloc(rbd_img_request_cache, GFP_NOIO);
2242 img_request->rq = NULL;
2243 img_request->rbd_dev = rbd_dev;
2244 img_request->offset = offset;
2245 img_request->length = length;
2246 img_request->flags = 0;
2247 if (op_type == OBJ_OP_DISCARD) {
2248 img_request_discard_set(img_request);
2249 img_request->snapc = snapc;
2250 } else if (op_type == OBJ_OP_WRITE) {
2251 img_request_write_set(img_request);
2252 img_request->snapc = snapc;
2254 img_request->snap_id = rbd_dev->spec->snap_id;
2256 if (rbd_dev_parent_get(rbd_dev))
2257 img_request_layered_set(img_request);
2258 spin_lock_init(&img_request->completion_lock);
2259 img_request->next_completion = 0;
2260 img_request->callback = NULL;
2261 img_request->result = 0;
2262 img_request->obj_request_count = 0;
2263 INIT_LIST_HEAD(&img_request->obj_requests);
2264 kref_init(&img_request->kref);
2266 dout("%s: rbd_dev %p %s %llu/%llu -> img %p\n", __func__, rbd_dev,
2267 obj_op_name(op_type), offset, length, img_request);
2272 static void rbd_img_request_destroy(struct kref *kref)
2274 struct rbd_img_request *img_request;
2275 struct rbd_obj_request *obj_request;
2276 struct rbd_obj_request *next_obj_request;
2278 img_request = container_of(kref, struct rbd_img_request, kref);
2280 dout("%s: img %p\n", __func__, img_request);
2282 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
2283 rbd_img_obj_request_del(img_request, obj_request);
2284 rbd_assert(img_request->obj_request_count == 0);
2286 if (img_request_layered_test(img_request)) {
2287 img_request_layered_clear(img_request);
2288 rbd_dev_parent_put(img_request->rbd_dev);
2291 if (img_request_write_test(img_request) ||
2292 img_request_discard_test(img_request))
2293 ceph_put_snap_context(img_request->snapc);
2295 kmem_cache_free(rbd_img_request_cache, img_request);
2298 static struct rbd_img_request *rbd_parent_request_create(
2299 struct rbd_obj_request *obj_request,
2300 u64 img_offset, u64 length)
2302 struct rbd_img_request *parent_request;
2303 struct rbd_device *rbd_dev;
2305 rbd_assert(obj_request->img_request);
2306 rbd_dev = obj_request->img_request->rbd_dev;
2308 parent_request = rbd_img_request_create(rbd_dev->parent, img_offset,
2309 length, OBJ_OP_READ, NULL);
2310 if (!parent_request)
2313 img_request_child_set(parent_request);
2314 rbd_obj_request_get(obj_request);
2315 parent_request->obj_request = obj_request;
2317 return parent_request;
2320 static void rbd_parent_request_destroy(struct kref *kref)
2322 struct rbd_img_request *parent_request;
2323 struct rbd_obj_request *orig_request;
2325 parent_request = container_of(kref, struct rbd_img_request, kref);
2326 orig_request = parent_request->obj_request;
2328 parent_request->obj_request = NULL;
2329 rbd_obj_request_put(orig_request);
2330 img_request_child_clear(parent_request);
2332 rbd_img_request_destroy(kref);
2335 static bool rbd_img_obj_end_request(struct rbd_obj_request *obj_request)
2337 struct rbd_img_request *img_request;
2338 unsigned int xferred;
2342 rbd_assert(obj_request_img_data_test(obj_request));
2343 img_request = obj_request->img_request;
2345 rbd_assert(obj_request->xferred <= (u64)UINT_MAX);
2346 xferred = (unsigned int)obj_request->xferred;
2347 result = obj_request->result;
2349 struct rbd_device *rbd_dev = img_request->rbd_dev;
2350 enum obj_operation_type op_type;
2352 if (img_request_discard_test(img_request))
2353 op_type = OBJ_OP_DISCARD;
2354 else if (img_request_write_test(img_request))
2355 op_type = OBJ_OP_WRITE;
2357 op_type = OBJ_OP_READ;
2359 rbd_warn(rbd_dev, "%s %llx at %llx (%llx)",
2360 obj_op_name(op_type), obj_request->length,
2361 obj_request->img_offset, obj_request->offset);
2362 rbd_warn(rbd_dev, " result %d xferred %x",
2364 if (!img_request->result)
2365 img_request->result = result;
2367 * Need to end I/O on the entire obj_request worth of
2368 * bytes in case of error.
2370 xferred = obj_request->length;
2373 if (img_request_child_test(img_request)) {
2374 rbd_assert(img_request->obj_request != NULL);
2375 more = obj_request->which < img_request->obj_request_count - 1;
2377 rbd_assert(img_request->rq != NULL);
2379 more = blk_update_request(img_request->rq, result, xferred);
2381 __blk_mq_end_request(img_request->rq, result);
2387 static void rbd_img_obj_callback(struct rbd_obj_request *obj_request)
2389 struct rbd_img_request *img_request;
2390 u32 which = obj_request->which;
2393 rbd_assert(obj_request_img_data_test(obj_request));
2394 img_request = obj_request->img_request;
2396 dout("%s: img %p obj %p\n", __func__, img_request, obj_request);
2397 rbd_assert(img_request != NULL);
2398 rbd_assert(img_request->obj_request_count > 0);
2399 rbd_assert(which != BAD_WHICH);
2400 rbd_assert(which < img_request->obj_request_count);
2402 spin_lock_irq(&img_request->completion_lock);
2403 if (which != img_request->next_completion)
2406 for_each_obj_request_from(img_request, obj_request) {
2408 rbd_assert(which < img_request->obj_request_count);
2410 if (!obj_request_done_test(obj_request))
2412 more = rbd_img_obj_end_request(obj_request);
2416 rbd_assert(more ^ (which == img_request->obj_request_count));
2417 img_request->next_completion = which;
2419 spin_unlock_irq(&img_request->completion_lock);
2420 rbd_img_request_put(img_request);
2423 rbd_img_request_complete(img_request);
2427 * Add individual osd ops to the given ceph_osd_request and prepare
2428 * them for submission. num_ops is the current number of
2429 * osd operations already to the object request.
2431 static void rbd_img_obj_request_fill(struct rbd_obj_request *obj_request,
2432 struct ceph_osd_request *osd_request,
2433 enum obj_operation_type op_type,
2434 unsigned int num_ops)
2436 struct rbd_img_request *img_request = obj_request->img_request;
2437 struct rbd_device *rbd_dev = img_request->rbd_dev;
2438 u64 object_size = rbd_obj_bytes(&rbd_dev->header);
2439 u64 offset = obj_request->offset;
2440 u64 length = obj_request->length;
2444 if (op_type == OBJ_OP_DISCARD) {
2445 if (!offset && length == object_size &&
2446 (!img_request_layered_test(img_request) ||
2447 !obj_request_overlaps_parent(obj_request))) {
2448 opcode = CEPH_OSD_OP_DELETE;
2449 } else if ((offset + length == object_size)) {
2450 opcode = CEPH_OSD_OP_TRUNCATE;
2452 down_read(&rbd_dev->header_rwsem);
2453 img_end = rbd_dev->header.image_size;
2454 up_read(&rbd_dev->header_rwsem);
2456 if (obj_request->img_offset + length == img_end)
2457 opcode = CEPH_OSD_OP_TRUNCATE;
2459 opcode = CEPH_OSD_OP_ZERO;
2461 } else if (op_type == OBJ_OP_WRITE) {
2462 if (!offset && length == object_size)
2463 opcode = CEPH_OSD_OP_WRITEFULL;
2465 opcode = CEPH_OSD_OP_WRITE;
2466 osd_req_op_alloc_hint_init(osd_request, num_ops,
2467 object_size, object_size);
2470 opcode = CEPH_OSD_OP_READ;
2473 if (opcode == CEPH_OSD_OP_DELETE)
2474 osd_req_op_init(osd_request, num_ops, opcode, 0);
2476 osd_req_op_extent_init(osd_request, num_ops, opcode,
2477 offset, length, 0, 0);
2479 if (obj_request->type == OBJ_REQUEST_BIO)
2480 osd_req_op_extent_osd_data_bio(osd_request, num_ops,
2481 obj_request->bio_list, length);
2482 else if (obj_request->type == OBJ_REQUEST_PAGES)
2483 osd_req_op_extent_osd_data_pages(osd_request, num_ops,
2484 obj_request->pages, length,
2485 offset & ~PAGE_MASK, false, false);
2487 /* Discards are also writes */
2488 if (op_type == OBJ_OP_WRITE || op_type == OBJ_OP_DISCARD)
2489 rbd_osd_req_format_write(obj_request);
2491 rbd_osd_req_format_read(obj_request);
2495 * Split up an image request into one or more object requests, each
2496 * to a different object. The "type" parameter indicates whether
2497 * "data_desc" is the pointer to the head of a list of bio
2498 * structures, or the base of a page array. In either case this
2499 * function assumes data_desc describes memory sufficient to hold
2500 * all data described by the image request.
2502 static int rbd_img_request_fill(struct rbd_img_request *img_request,
2503 enum obj_request_type type,
2506 struct rbd_device *rbd_dev = img_request->rbd_dev;
2507 struct rbd_obj_request *obj_request = NULL;
2508 struct rbd_obj_request *next_obj_request;
2509 struct bio *bio_list = NULL;
2510 unsigned int bio_offset = 0;
2511 struct page **pages = NULL;
2512 enum obj_operation_type op_type;
2516 dout("%s: img %p type %d data_desc %p\n", __func__, img_request,
2517 (int)type, data_desc);
2519 img_offset = img_request->offset;
2520 resid = img_request->length;
2521 rbd_assert(resid > 0);
2522 op_type = rbd_img_request_op_type(img_request);
2524 if (type == OBJ_REQUEST_BIO) {
2525 bio_list = data_desc;
2526 rbd_assert(img_offset ==
2527 bio_list->bi_iter.bi_sector << SECTOR_SHIFT);
2528 } else if (type == OBJ_REQUEST_PAGES) {
2533 struct ceph_osd_request *osd_req;
2534 const char *object_name;
2538 object_name = rbd_segment_name(rbd_dev, img_offset);
2541 offset = rbd_segment_offset(rbd_dev, img_offset);
2542 length = rbd_segment_length(rbd_dev, img_offset, resid);
2543 obj_request = rbd_obj_request_create(object_name,
2544 offset, length, type);
2545 /* object request has its own copy of the object name */
2546 rbd_segment_name_free(object_name);
2551 * set obj_request->img_request before creating the
2552 * osd_request so that it gets the right snapc
2554 rbd_img_obj_request_add(img_request, obj_request);
2556 if (type == OBJ_REQUEST_BIO) {
2557 unsigned int clone_size;
2559 rbd_assert(length <= (u64)UINT_MAX);
2560 clone_size = (unsigned int)length;
2561 obj_request->bio_list =
2562 bio_chain_clone_range(&bio_list,
2566 if (!obj_request->bio_list)
2568 } else if (type == OBJ_REQUEST_PAGES) {
2569 unsigned int page_count;
2571 obj_request->pages = pages;
2572 page_count = (u32)calc_pages_for(offset, length);
2573 obj_request->page_count = page_count;
2574 if ((offset + length) & ~PAGE_MASK)
2575 page_count--; /* more on last page */
2576 pages += page_count;
2579 osd_req = rbd_osd_req_create(rbd_dev, op_type,
2580 (op_type == OBJ_OP_WRITE) ? 2 : 1,
2585 obj_request->osd_req = osd_req;
2586 obj_request->callback = rbd_img_obj_callback;
2587 obj_request->img_offset = img_offset;
2589 rbd_img_obj_request_fill(obj_request, osd_req, op_type, 0);
2591 img_offset += length;
2598 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
2599 rbd_img_obj_request_del(img_request, obj_request);
2605 rbd_osd_copyup_callback(struct rbd_obj_request *obj_request)
2607 struct rbd_img_request *img_request;
2608 struct rbd_device *rbd_dev;
2609 struct page **pages;
2612 dout("%s: obj %p\n", __func__, obj_request);
2614 rbd_assert(obj_request->type == OBJ_REQUEST_BIO ||
2615 obj_request->type == OBJ_REQUEST_NODATA);
2616 rbd_assert(obj_request_img_data_test(obj_request));
2617 img_request = obj_request->img_request;
2618 rbd_assert(img_request);
2620 rbd_dev = img_request->rbd_dev;
2621 rbd_assert(rbd_dev);
2623 pages = obj_request->copyup_pages;
2624 rbd_assert(pages != NULL);
2625 obj_request->copyup_pages = NULL;
2626 page_count = obj_request->copyup_page_count;
2627 rbd_assert(page_count);
2628 obj_request->copyup_page_count = 0;
2629 ceph_release_page_vector(pages, page_count);
2632 * We want the transfer count to reflect the size of the
2633 * original write request. There is no such thing as a
2634 * successful short write, so if the request was successful
2635 * we can just set it to the originally-requested length.
2637 if (!obj_request->result)
2638 obj_request->xferred = obj_request->length;
2640 obj_request_done_set(obj_request);
2644 rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request)
2646 struct rbd_obj_request *orig_request;
2647 struct ceph_osd_request *osd_req;
2648 struct rbd_device *rbd_dev;
2649 struct page **pages;
2650 enum obj_operation_type op_type;
2655 rbd_assert(img_request_child_test(img_request));
2657 /* First get what we need from the image request */
2659 pages = img_request->copyup_pages;
2660 rbd_assert(pages != NULL);
2661 img_request->copyup_pages = NULL;
2662 page_count = img_request->copyup_page_count;
2663 rbd_assert(page_count);
2664 img_request->copyup_page_count = 0;
2666 orig_request = img_request->obj_request;
2667 rbd_assert(orig_request != NULL);
2668 rbd_assert(obj_request_type_valid(orig_request->type));
2669 img_result = img_request->result;
2670 parent_length = img_request->length;
2671 rbd_assert(img_result || parent_length == img_request->xferred);
2672 rbd_img_request_put(img_request);
2674 rbd_assert(orig_request->img_request);
2675 rbd_dev = orig_request->img_request->rbd_dev;
2676 rbd_assert(rbd_dev);
2679 * If the overlap has become 0 (most likely because the
2680 * image has been flattened) we need to free the pages
2681 * and re-submit the original write request.
2683 if (!rbd_dev->parent_overlap) {
2684 ceph_release_page_vector(pages, page_count);
2685 rbd_obj_request_submit(orig_request);
2693 * The original osd request is of no use to use any more.
2694 * We need a new one that can hold the three ops in a copyup
2695 * request. Allocate the new copyup osd request for the
2696 * original request, and release the old one.
2698 img_result = -ENOMEM;
2699 osd_req = rbd_osd_req_create_copyup(orig_request);
2702 rbd_osd_req_destroy(orig_request->osd_req);
2703 orig_request->osd_req = osd_req;
2704 orig_request->copyup_pages = pages;
2705 orig_request->copyup_page_count = page_count;
2707 /* Initialize the copyup op */
2709 osd_req_op_cls_init(osd_req, 0, CEPH_OSD_OP_CALL, "rbd", "copyup");
2710 osd_req_op_cls_request_data_pages(osd_req, 0, pages, parent_length, 0,
2713 /* Add the other op(s) */
2715 op_type = rbd_img_request_op_type(orig_request->img_request);
2716 rbd_img_obj_request_fill(orig_request, osd_req, op_type, 1);
2718 /* All set, send it off. */
2720 rbd_obj_request_submit(orig_request);
2724 ceph_release_page_vector(pages, page_count);
2725 orig_request->result = img_result;
2726 orig_request->xferred = 0;
2727 rbd_img_request_get(orig_request->img_request);
2728 obj_request_done_set(orig_request);
2729 rbd_obj_request_complete(orig_request);
2733 * Read from the parent image the range of data that covers the
2734 * entire target of the given object request. This is used for
2735 * satisfying a layered image write request when the target of an
2736 * object request from the image request does not exist.
2738 * A page array big enough to hold the returned data is allocated
2739 * and supplied to rbd_img_request_fill() as the "data descriptor."
2740 * When the read completes, this page array will be transferred to
2741 * the original object request for the copyup operation.
2743 * If an error occurs, it is recorded as the result of the original
2744 * object request in rbd_img_obj_exists_callback().
2746 static int rbd_img_obj_parent_read_full(struct rbd_obj_request *obj_request)
2748 struct rbd_device *rbd_dev = obj_request->img_request->rbd_dev;
2749 struct rbd_img_request *parent_request = NULL;
2752 struct page **pages = NULL;
2756 rbd_assert(rbd_dev->parent != NULL);
2759 * Determine the byte range covered by the object in the
2760 * child image to which the original request was to be sent.
2762 img_offset = obj_request->img_offset - obj_request->offset;
2763 length = (u64)1 << rbd_dev->header.obj_order;
2766 * There is no defined parent data beyond the parent
2767 * overlap, so limit what we read at that boundary if
2770 if (img_offset + length > rbd_dev->parent_overlap) {
2771 rbd_assert(img_offset < rbd_dev->parent_overlap);
2772 length = rbd_dev->parent_overlap - img_offset;
2776 * Allocate a page array big enough to receive the data read
2779 page_count = (u32)calc_pages_for(0, length);
2780 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2781 if (IS_ERR(pages)) {
2782 result = PTR_ERR(pages);
2788 parent_request = rbd_parent_request_create(obj_request,
2789 img_offset, length);
2790 if (!parent_request)
2793 result = rbd_img_request_fill(parent_request, OBJ_REQUEST_PAGES, pages);
2797 parent_request->copyup_pages = pages;
2798 parent_request->copyup_page_count = page_count;
2799 parent_request->callback = rbd_img_obj_parent_read_full_callback;
2801 result = rbd_img_request_submit(parent_request);
2805 parent_request->copyup_pages = NULL;
2806 parent_request->copyup_page_count = 0;
2807 parent_request->obj_request = NULL;
2808 rbd_obj_request_put(obj_request);
2811 ceph_release_page_vector(pages, page_count);
2813 rbd_img_request_put(parent_request);
2817 static void rbd_img_obj_exists_callback(struct rbd_obj_request *obj_request)
2819 struct rbd_obj_request *orig_request;
2820 struct rbd_device *rbd_dev;
2823 rbd_assert(!obj_request_img_data_test(obj_request));
2826 * All we need from the object request is the original
2827 * request and the result of the STAT op. Grab those, then
2828 * we're done with the request.
2830 orig_request = obj_request->obj_request;
2831 obj_request->obj_request = NULL;
2832 rbd_obj_request_put(orig_request);
2833 rbd_assert(orig_request);
2834 rbd_assert(orig_request->img_request);
2836 result = obj_request->result;
2837 obj_request->result = 0;
2839 dout("%s: obj %p for obj %p result %d %llu/%llu\n", __func__,
2840 obj_request, orig_request, result,
2841 obj_request->xferred, obj_request->length);
2842 rbd_obj_request_put(obj_request);
2845 * If the overlap has become 0 (most likely because the
2846 * image has been flattened) we need to re-submit the
2849 rbd_dev = orig_request->img_request->rbd_dev;
2850 if (!rbd_dev->parent_overlap) {
2851 rbd_obj_request_submit(orig_request);
2856 * Our only purpose here is to determine whether the object
2857 * exists, and we don't want to treat the non-existence as
2858 * an error. If something else comes back, transfer the
2859 * error to the original request and complete it now.
2862 obj_request_existence_set(orig_request, true);
2863 } else if (result == -ENOENT) {
2864 obj_request_existence_set(orig_request, false);
2866 goto fail_orig_request;
2870 * Resubmit the original request now that we have recorded
2871 * whether the target object exists.
2873 result = rbd_img_obj_request_submit(orig_request);
2875 goto fail_orig_request;
2880 orig_request->result = result;
2881 orig_request->xferred = 0;
2882 rbd_img_request_get(orig_request->img_request);
2883 obj_request_done_set(orig_request);
2884 rbd_obj_request_complete(orig_request);
2887 static int rbd_img_obj_exists_submit(struct rbd_obj_request *obj_request)
2889 struct rbd_device *rbd_dev = obj_request->img_request->rbd_dev;
2890 struct rbd_obj_request *stat_request;
2891 struct page **pages;
2896 stat_request = rbd_obj_request_create(obj_request->object_name, 0, 0,
2901 stat_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_READ, 1,
2903 if (!stat_request->osd_req) {
2905 goto fail_stat_request;
2909 * The response data for a STAT call consists of:
2916 size = sizeof (__le64) + sizeof (__le32) + sizeof (__le32);
2917 page_count = (u32)calc_pages_for(0, size);
2918 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2919 if (IS_ERR(pages)) {
2920 ret = PTR_ERR(pages);
2921 goto fail_stat_request;
2924 osd_req_op_init(stat_request->osd_req, 0, CEPH_OSD_OP_STAT, 0);
2925 osd_req_op_raw_data_in_pages(stat_request->osd_req, 0, pages, size, 0,
2928 rbd_obj_request_get(obj_request);
2929 stat_request->obj_request = obj_request;
2930 stat_request->pages = pages;
2931 stat_request->page_count = page_count;
2932 stat_request->callback = rbd_img_obj_exists_callback;
2934 rbd_obj_request_submit(stat_request);
2938 rbd_obj_request_put(stat_request);
2942 static bool img_obj_request_simple(struct rbd_obj_request *obj_request)
2944 struct rbd_img_request *img_request = obj_request->img_request;
2945 struct rbd_device *rbd_dev = img_request->rbd_dev;
2948 if (!img_request_write_test(img_request) &&
2949 !img_request_discard_test(img_request))
2952 /* Non-layered writes */
2953 if (!img_request_layered_test(img_request))
2957 * Layered writes outside of the parent overlap range don't
2958 * share any data with the parent.
2960 if (!obj_request_overlaps_parent(obj_request))
2964 * Entire-object layered writes - we will overwrite whatever
2965 * parent data there is anyway.
2967 if (!obj_request->offset &&
2968 obj_request->length == rbd_obj_bytes(&rbd_dev->header))
2972 * If the object is known to already exist, its parent data has
2973 * already been copied.
2975 if (obj_request_known_test(obj_request) &&
2976 obj_request_exists_test(obj_request))
2982 static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request)
2984 rbd_assert(obj_request_img_data_test(obj_request));
2985 rbd_assert(obj_request_type_valid(obj_request->type));
2986 rbd_assert(obj_request->img_request);
2988 if (img_obj_request_simple(obj_request)) {
2989 rbd_obj_request_submit(obj_request);
2994 * It's a layered write. The target object might exist but
2995 * we may not know that yet. If we know it doesn't exist,
2996 * start by reading the data for the full target object from
2997 * the parent so we can use it for a copyup to the target.
2999 if (obj_request_known_test(obj_request))
3000 return rbd_img_obj_parent_read_full(obj_request);
3002 /* We don't know whether the target exists. Go find out. */
3004 return rbd_img_obj_exists_submit(obj_request);
3007 static int rbd_img_request_submit(struct rbd_img_request *img_request)
3009 struct rbd_obj_request *obj_request;
3010 struct rbd_obj_request *next_obj_request;
3013 dout("%s: img %p\n", __func__, img_request);
3015 rbd_img_request_get(img_request);
3016 for_each_obj_request_safe(img_request, obj_request, next_obj_request) {
3017 ret = rbd_img_obj_request_submit(obj_request);
3023 rbd_img_request_put(img_request);
3027 static void rbd_img_parent_read_callback(struct rbd_img_request *img_request)
3029 struct rbd_obj_request *obj_request;
3030 struct rbd_device *rbd_dev;
3035 rbd_assert(img_request_child_test(img_request));
3037 /* First get what we need from the image request and release it */
3039 obj_request = img_request->obj_request;
3040 img_xferred = img_request->xferred;
3041 img_result = img_request->result;
3042 rbd_img_request_put(img_request);
3045 * If the overlap has become 0 (most likely because the
3046 * image has been flattened) we need to re-submit the
3049 rbd_assert(obj_request);
3050 rbd_assert(obj_request->img_request);
3051 rbd_dev = obj_request->img_request->rbd_dev;
3052 if (!rbd_dev->parent_overlap) {
3053 rbd_obj_request_submit(obj_request);
3057 obj_request->result = img_result;
3058 if (obj_request->result)
3062 * We need to zero anything beyond the parent overlap
3063 * boundary. Since rbd_img_obj_request_read_callback()
3064 * will zero anything beyond the end of a short read, an
3065 * easy way to do this is to pretend the data from the
3066 * parent came up short--ending at the overlap boundary.
3068 rbd_assert(obj_request->img_offset < U64_MAX - obj_request->length);
3069 obj_end = obj_request->img_offset + obj_request->length;
3070 if (obj_end > rbd_dev->parent_overlap) {
3073 if (obj_request->img_offset < rbd_dev->parent_overlap)
3074 xferred = rbd_dev->parent_overlap -
3075 obj_request->img_offset;
3077 obj_request->xferred = min(img_xferred, xferred);
3079 obj_request->xferred = img_xferred;
3082 rbd_img_obj_request_read_callback(obj_request);
3083 rbd_obj_request_complete(obj_request);
3086 static void rbd_img_parent_read(struct rbd_obj_request *obj_request)
3088 struct rbd_img_request *img_request;
3091 rbd_assert(obj_request_img_data_test(obj_request));
3092 rbd_assert(obj_request->img_request != NULL);
3093 rbd_assert(obj_request->result == (s32) -ENOENT);
3094 rbd_assert(obj_request_type_valid(obj_request->type));
3096 /* rbd_read_finish(obj_request, obj_request->length); */
3097 img_request = rbd_parent_request_create(obj_request,
3098 obj_request->img_offset,
3099 obj_request->length);
3104 if (obj_request->type == OBJ_REQUEST_BIO)
3105 result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
3106 obj_request->bio_list);
3108 result = rbd_img_request_fill(img_request, OBJ_REQUEST_PAGES,
3109 obj_request->pages);
3113 img_request->callback = rbd_img_parent_read_callback;
3114 result = rbd_img_request_submit(img_request);
3121 rbd_img_request_put(img_request);
3122 obj_request->result = result;
3123 obj_request->xferred = 0;
3124 obj_request_done_set(obj_request);
3127 static const struct rbd_client_id rbd_empty_cid;
3129 static bool rbd_cid_equal(const struct rbd_client_id *lhs,
3130 const struct rbd_client_id *rhs)
3132 return lhs->gid == rhs->gid && lhs->handle == rhs->handle;
3135 static struct rbd_client_id rbd_get_cid(struct rbd_device *rbd_dev)
3137 struct rbd_client_id cid;
3139 mutex_lock(&rbd_dev->watch_mutex);
3140 cid.gid = ceph_client_gid(rbd_dev->rbd_client->client);
3141 cid.handle = rbd_dev->watch_cookie;
3142 mutex_unlock(&rbd_dev->watch_mutex);
3147 * lock_rwsem must be held for write
3149 static void rbd_set_owner_cid(struct rbd_device *rbd_dev,
3150 const struct rbd_client_id *cid)
3152 dout("%s rbd_dev %p %llu-%llu -> %llu-%llu\n", __func__, rbd_dev,
3153 rbd_dev->owner_cid.gid, rbd_dev->owner_cid.handle,
3154 cid->gid, cid->handle);
3155 rbd_dev->owner_cid = *cid; /* struct */
3158 static void format_lock_cookie(struct rbd_device *rbd_dev, char *buf)
3160 mutex_lock(&rbd_dev->watch_mutex);
3161 sprintf(buf, "%s %llu", RBD_LOCK_COOKIE_PREFIX, rbd_dev->watch_cookie);
3162 mutex_unlock(&rbd_dev->watch_mutex);
3166 * lock_rwsem must be held for write
3168 static int rbd_lock(struct rbd_device *rbd_dev)
3170 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3171 struct rbd_client_id cid = rbd_get_cid(rbd_dev);
3175 WARN_ON(__rbd_is_lock_owner(rbd_dev));
3177 format_lock_cookie(rbd_dev, cookie);
3178 ret = ceph_cls_lock(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
3179 RBD_LOCK_NAME, CEPH_CLS_LOCK_EXCLUSIVE, cookie,
3180 RBD_LOCK_TAG, "", 0);
3184 rbd_dev->lock_state = RBD_LOCK_STATE_LOCKED;
3185 rbd_set_owner_cid(rbd_dev, &cid);
3186 queue_work(rbd_dev->task_wq, &rbd_dev->acquired_lock_work);
3191 * lock_rwsem must be held for write
3193 static int rbd_unlock(struct rbd_device *rbd_dev)
3195 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3199 WARN_ON(!__rbd_is_lock_owner(rbd_dev));
3201 rbd_dev->lock_state = RBD_LOCK_STATE_UNLOCKED;
3203 format_lock_cookie(rbd_dev, cookie);
3204 ret = ceph_cls_unlock(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
3205 RBD_LOCK_NAME, cookie);
3206 if (ret && ret != -ENOENT) {
3207 rbd_warn(rbd_dev, "cls_unlock failed: %d", ret);
3211 rbd_set_owner_cid(rbd_dev, &rbd_empty_cid);
3212 queue_work(rbd_dev->task_wq, &rbd_dev->released_lock_work);
3216 static int __rbd_notify_op_lock(struct rbd_device *rbd_dev,
3217 enum rbd_notify_op notify_op,
3218 struct page ***preply_pages,
3221 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3222 struct rbd_client_id cid = rbd_get_cid(rbd_dev);
3223 int buf_size = 4 + 8 + 8 + CEPH_ENCODING_START_BLK_LEN;
3227 dout("%s rbd_dev %p notify_op %d\n", __func__, rbd_dev, notify_op);
3229 /* encode *LockPayload NotifyMessage (op + ClientId) */
3230 ceph_start_encoding(&p, 2, 1, buf_size - CEPH_ENCODING_START_BLK_LEN);
3231 ceph_encode_32(&p, notify_op);
3232 ceph_encode_64(&p, cid.gid);
3233 ceph_encode_64(&p, cid.handle);
3235 return ceph_osdc_notify(osdc, &rbd_dev->header_oid,
3236 &rbd_dev->header_oloc, buf, buf_size,
3237 RBD_NOTIFY_TIMEOUT, preply_pages, preply_len);
3240 static void rbd_notify_op_lock(struct rbd_device *rbd_dev,
3241 enum rbd_notify_op notify_op)
3243 struct page **reply_pages;
3246 __rbd_notify_op_lock(rbd_dev, notify_op, &reply_pages, &reply_len);
3247 ceph_release_page_vector(reply_pages, calc_pages_for(0, reply_len));
3250 static void rbd_notify_acquired_lock(struct work_struct *work)
3252 struct rbd_device *rbd_dev = container_of(work, struct rbd_device,
3253 acquired_lock_work);
3255 rbd_notify_op_lock(rbd_dev, RBD_NOTIFY_OP_ACQUIRED_LOCK);
3258 static void rbd_notify_released_lock(struct work_struct *work)
3260 struct rbd_device *rbd_dev = container_of(work, struct rbd_device,
3261 released_lock_work);
3263 rbd_notify_op_lock(rbd_dev, RBD_NOTIFY_OP_RELEASED_LOCK);
3266 static int rbd_request_lock(struct rbd_device *rbd_dev)
3268 struct page **reply_pages;
3270 bool lock_owner_responded = false;
3273 dout("%s rbd_dev %p\n", __func__, rbd_dev);
3275 ret = __rbd_notify_op_lock(rbd_dev, RBD_NOTIFY_OP_REQUEST_LOCK,
3276 &reply_pages, &reply_len);
3277 if (ret && ret != -ETIMEDOUT) {
3278 rbd_warn(rbd_dev, "failed to request lock: %d", ret);
3282 if (reply_len > 0 && reply_len <= PAGE_SIZE) {
3283 void *p = page_address(reply_pages[0]);
3284 void *const end = p + reply_len;
3287 ceph_decode_32_safe(&p, end, n, e_inval); /* num_acks */
3292 ceph_decode_need(&p, end, 8 + 8, e_inval);
3293 p += 8 + 8; /* skip gid and cookie */
3295 ceph_decode_32_safe(&p, end, len, e_inval);
3299 if (lock_owner_responded) {
3301 "duplicate lock owners detected");
3306 lock_owner_responded = true;
3307 ret = ceph_start_decoding(&p, end, 1, "ResponseMessage",
3311 "failed to decode ResponseMessage: %d",
3316 ret = ceph_decode_32(&p);
3320 if (!lock_owner_responded) {
3321 rbd_warn(rbd_dev, "no lock owners detected");
3326 ceph_release_page_vector(reply_pages, calc_pages_for(0, reply_len));
3334 static void wake_requests(struct rbd_device *rbd_dev, bool wake_all)
3336 dout("%s rbd_dev %p wake_all %d\n", __func__, rbd_dev, wake_all);
3338 cancel_delayed_work(&rbd_dev->lock_dwork);
3340 wake_up_all(&rbd_dev->lock_waitq);
3342 wake_up(&rbd_dev->lock_waitq);
3345 static int get_lock_owner_info(struct rbd_device *rbd_dev,
3346 struct ceph_locker **lockers, u32 *num_lockers)
3348 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3353 dout("%s rbd_dev %p\n", __func__, rbd_dev);
3355 ret = ceph_cls_lock_info(osdc, &rbd_dev->header_oid,
3356 &rbd_dev->header_oloc, RBD_LOCK_NAME,
3357 &lock_type, &lock_tag, lockers, num_lockers);
3361 if (*num_lockers == 0) {
3362 dout("%s rbd_dev %p no lockers detected\n", __func__, rbd_dev);
3366 if (strcmp(lock_tag, RBD_LOCK_TAG)) {
3367 rbd_warn(rbd_dev, "locked by external mechanism, tag %s",
3373 if (lock_type == CEPH_CLS_LOCK_SHARED) {
3374 rbd_warn(rbd_dev, "shared lock type detected");
3379 if (strncmp((*lockers)[0].id.cookie, RBD_LOCK_COOKIE_PREFIX,
3380 strlen(RBD_LOCK_COOKIE_PREFIX))) {
3381 rbd_warn(rbd_dev, "locked by external mechanism, cookie %s",
3382 (*lockers)[0].id.cookie);
3392 static int find_watcher(struct rbd_device *rbd_dev,
3393 const struct ceph_locker *locker)
3395 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3396 struct ceph_watch_item *watchers;
3402 ret = ceph_osdc_list_watchers(osdc, &rbd_dev->header_oid,
3403 &rbd_dev->header_oloc, &watchers,
3408 sscanf(locker->id.cookie, RBD_LOCK_COOKIE_PREFIX " %llu", &cookie);
3409 for (i = 0; i < num_watchers; i++) {
3410 if (!memcmp(&watchers[i].addr, &locker->info.addr,
3411 sizeof(locker->info.addr)) &&
3412 watchers[i].cookie == cookie) {
3413 struct rbd_client_id cid = {
3414 .gid = le64_to_cpu(watchers[i].name.num),
3418 dout("%s rbd_dev %p found cid %llu-%llu\n", __func__,
3419 rbd_dev, cid.gid, cid.handle);
3420 rbd_set_owner_cid(rbd_dev, &cid);
3426 dout("%s rbd_dev %p no watchers\n", __func__, rbd_dev);
3434 * lock_rwsem must be held for write
3436 static int rbd_try_lock(struct rbd_device *rbd_dev)
3438 struct ceph_client *client = rbd_dev->rbd_client->client;
3439 struct ceph_locker *lockers;
3444 ret = rbd_lock(rbd_dev);
3448 /* determine if the current lock holder is still alive */
3449 ret = get_lock_owner_info(rbd_dev, &lockers, &num_lockers);
3453 if (num_lockers == 0)
3456 ret = find_watcher(rbd_dev, lockers);
3459 ret = 0; /* have to request lock */
3463 rbd_warn(rbd_dev, "%s%llu seems dead, breaking lock",
3464 ENTITY_NAME(lockers[0].id.name));
3466 ret = ceph_monc_blacklist_add(&client->monc,
3467 &lockers[0].info.addr);
3469 rbd_warn(rbd_dev, "blacklist of %s%llu failed: %d",
3470 ENTITY_NAME(lockers[0].id.name), ret);
3474 ret = ceph_cls_break_lock(&client->osdc, &rbd_dev->header_oid,
3475 &rbd_dev->header_oloc, RBD_LOCK_NAME,
3476 lockers[0].id.cookie,
3477 &lockers[0].id.name);
3478 if (ret && ret != -ENOENT)
3482 ceph_free_lockers(lockers, num_lockers);
3486 ceph_free_lockers(lockers, num_lockers);
3491 * ret is set only if lock_state is RBD_LOCK_STATE_UNLOCKED
3493 static enum rbd_lock_state rbd_try_acquire_lock(struct rbd_device *rbd_dev,
3496 enum rbd_lock_state lock_state;
3498 down_read(&rbd_dev->lock_rwsem);
3499 dout("%s rbd_dev %p read lock_state %d\n", __func__, rbd_dev,
3500 rbd_dev->lock_state);
3501 if (__rbd_is_lock_owner(rbd_dev)) {
3502 lock_state = rbd_dev->lock_state;
3503 up_read(&rbd_dev->lock_rwsem);
3507 up_read(&rbd_dev->lock_rwsem);
3508 down_write(&rbd_dev->lock_rwsem);
3509 dout("%s rbd_dev %p write lock_state %d\n", __func__, rbd_dev,
3510 rbd_dev->lock_state);
3511 if (!__rbd_is_lock_owner(rbd_dev)) {
3512 *pret = rbd_try_lock(rbd_dev);
3514 rbd_warn(rbd_dev, "failed to acquire lock: %d", *pret);
3517 lock_state = rbd_dev->lock_state;
3518 up_write(&rbd_dev->lock_rwsem);
3522 static void rbd_acquire_lock(struct work_struct *work)
3524 struct rbd_device *rbd_dev = container_of(to_delayed_work(work),
3525 struct rbd_device, lock_dwork);
3526 enum rbd_lock_state lock_state;
3529 dout("%s rbd_dev %p\n", __func__, rbd_dev);
3531 lock_state = rbd_try_acquire_lock(rbd_dev, &ret);
3532 if (lock_state != RBD_LOCK_STATE_UNLOCKED || ret == -EBLACKLISTED) {
3533 if (lock_state == RBD_LOCK_STATE_LOCKED)
3534 wake_requests(rbd_dev, true);
3535 dout("%s rbd_dev %p lock_state %d ret %d - done\n", __func__,
3536 rbd_dev, lock_state, ret);
3540 ret = rbd_request_lock(rbd_dev);
3541 if (ret == -ETIMEDOUT) {
3542 goto again; /* treat this as a dead client */
3543 } else if (ret < 0) {
3544 rbd_warn(rbd_dev, "error requesting lock: %d", ret);
3545 mod_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork,
3549 * lock owner acked, but resend if we don't see them
3552 dout("%s rbd_dev %p requeueing lock_dwork\n", __func__,
3554 mod_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork,
3555 msecs_to_jiffies(2 * RBD_NOTIFY_TIMEOUT * MSEC_PER_SEC));
3560 * lock_rwsem must be held for write
3562 static bool rbd_release_lock(struct rbd_device *rbd_dev)
3564 dout("%s rbd_dev %p read lock_state %d\n", __func__, rbd_dev,
3565 rbd_dev->lock_state);
3566 if (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED)
3569 rbd_dev->lock_state = RBD_LOCK_STATE_RELEASING;
3570 downgrade_write(&rbd_dev->lock_rwsem);
3572 * Ensure that all in-flight IO is flushed.
3574 * FIXME: ceph_osdc_sync() flushes the entire OSD client, which
3575 * may be shared with other devices.
3577 ceph_osdc_sync(&rbd_dev->rbd_client->client->osdc);
3578 up_read(&rbd_dev->lock_rwsem);
3580 down_write(&rbd_dev->lock_rwsem);
3581 dout("%s rbd_dev %p write lock_state %d\n", __func__, rbd_dev,
3582 rbd_dev->lock_state);
3583 if (rbd_dev->lock_state != RBD_LOCK_STATE_RELEASING)
3586 if (!rbd_unlock(rbd_dev))
3588 * Give others a chance to grab the lock - we would re-acquire
3589 * almost immediately if we got new IO during ceph_osdc_sync()
3590 * otherwise. We need to ack our own notifications, so this
3591 * lock_dwork will be requeued from rbd_wait_state_locked()
3592 * after wake_requests() in rbd_handle_released_lock().
3594 cancel_delayed_work(&rbd_dev->lock_dwork);
3599 static void rbd_release_lock_work(struct work_struct *work)
3601 struct rbd_device *rbd_dev = container_of(work, struct rbd_device,
3604 down_write(&rbd_dev->lock_rwsem);
3605 rbd_release_lock(rbd_dev);
3606 up_write(&rbd_dev->lock_rwsem);
3609 static void rbd_handle_acquired_lock(struct rbd_device *rbd_dev, u8 struct_v,
3612 struct rbd_client_id cid = { 0 };
3614 if (struct_v >= 2) {
3615 cid.gid = ceph_decode_64(p);
3616 cid.handle = ceph_decode_64(p);
3619 dout("%s rbd_dev %p cid %llu-%llu\n", __func__, rbd_dev, cid.gid,
3621 if (!rbd_cid_equal(&cid, &rbd_empty_cid)) {
3622 down_write(&rbd_dev->lock_rwsem);
3623 if (rbd_cid_equal(&cid, &rbd_dev->owner_cid)) {
3625 * we already know that the remote client is
3628 up_write(&rbd_dev->lock_rwsem);
3632 rbd_set_owner_cid(rbd_dev, &cid);
3633 downgrade_write(&rbd_dev->lock_rwsem);
3635 down_read(&rbd_dev->lock_rwsem);
3638 if (!__rbd_is_lock_owner(rbd_dev))
3639 wake_requests(rbd_dev, false);
3640 up_read(&rbd_dev->lock_rwsem);
3643 static void rbd_handle_released_lock(struct rbd_device *rbd_dev, u8 struct_v,
3646 struct rbd_client_id cid = { 0 };
3648 if (struct_v >= 2) {
3649 cid.gid = ceph_decode_64(p);
3650 cid.handle = ceph_decode_64(p);
3653 dout("%s rbd_dev %p cid %llu-%llu\n", __func__, rbd_dev, cid.gid,
3655 if (!rbd_cid_equal(&cid, &rbd_empty_cid)) {
3656 down_write(&rbd_dev->lock_rwsem);
3657 if (!rbd_cid_equal(&cid, &rbd_dev->owner_cid)) {
3658 dout("%s rbd_dev %p unexpected owner, cid %llu-%llu != owner_cid %llu-%llu\n",
3659 __func__, rbd_dev, cid.gid, cid.handle,
3660 rbd_dev->owner_cid.gid, rbd_dev->owner_cid.handle);
3661 up_write(&rbd_dev->lock_rwsem);
3665 rbd_set_owner_cid(rbd_dev, &rbd_empty_cid);
3666 downgrade_write(&rbd_dev->lock_rwsem);
3668 down_read(&rbd_dev->lock_rwsem);
3671 if (!__rbd_is_lock_owner(rbd_dev))
3672 wake_requests(rbd_dev, false);
3673 up_read(&rbd_dev->lock_rwsem);
3676 static bool rbd_handle_request_lock(struct rbd_device *rbd_dev, u8 struct_v,
3679 struct rbd_client_id my_cid = rbd_get_cid(rbd_dev);
3680 struct rbd_client_id cid = { 0 };
3683 if (struct_v >= 2) {
3684 cid.gid = ceph_decode_64(p);
3685 cid.handle = ceph_decode_64(p);
3688 dout("%s rbd_dev %p cid %llu-%llu\n", __func__, rbd_dev, cid.gid,
3690 if (rbd_cid_equal(&cid, &my_cid))
3693 down_read(&rbd_dev->lock_rwsem);
3694 need_to_send = __rbd_is_lock_owner(rbd_dev);
3695 if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED) {
3696 if (!rbd_cid_equal(&rbd_dev->owner_cid, &rbd_empty_cid)) {
3697 dout("%s rbd_dev %p queueing unlock_work\n", __func__,
3699 queue_work(rbd_dev->task_wq, &rbd_dev->unlock_work);
3702 up_read(&rbd_dev->lock_rwsem);
3703 return need_to_send;
3706 static void __rbd_acknowledge_notify(struct rbd_device *rbd_dev,
3707 u64 notify_id, u64 cookie, s32 *result)
3709 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3710 int buf_size = 4 + CEPH_ENCODING_START_BLK_LEN;
3717 /* encode ResponseMessage */
3718 ceph_start_encoding(&p, 1, 1,
3719 buf_size - CEPH_ENCODING_START_BLK_LEN);
3720 ceph_encode_32(&p, *result);
3725 ret = ceph_osdc_notify_ack(osdc, &rbd_dev->header_oid,
3726 &rbd_dev->header_oloc, notify_id, cookie,
3729 rbd_warn(rbd_dev, "acknowledge_notify failed: %d", ret);
3732 static void rbd_acknowledge_notify(struct rbd_device *rbd_dev, u64 notify_id,
3735 dout("%s rbd_dev %p\n", __func__, rbd_dev);
3736 __rbd_acknowledge_notify(rbd_dev, notify_id, cookie, NULL);
3739 static void rbd_acknowledge_notify_result(struct rbd_device *rbd_dev,
3740 u64 notify_id, u64 cookie, s32 result)
3742 dout("%s rbd_dev %p result %d\n", __func__, rbd_dev, result);
3743 __rbd_acknowledge_notify(rbd_dev, notify_id, cookie, &result);
3746 static void rbd_watch_cb(void *arg, u64 notify_id, u64 cookie,
3747 u64 notifier_id, void *data, size_t data_len)
3749 struct rbd_device *rbd_dev = arg;
3751 void *const end = p + data_len;
3757 dout("%s rbd_dev %p cookie %llu notify_id %llu data_len %zu\n",
3758 __func__, rbd_dev, cookie, notify_id, data_len);
3760 ret = ceph_start_decoding(&p, end, 1, "NotifyMessage",
3763 rbd_warn(rbd_dev, "failed to decode NotifyMessage: %d",
3768 notify_op = ceph_decode_32(&p);
3770 /* legacy notification for header updates */
3771 notify_op = RBD_NOTIFY_OP_HEADER_UPDATE;
3775 dout("%s rbd_dev %p notify_op %u\n", __func__, rbd_dev, notify_op);
3776 switch (notify_op) {
3777 case RBD_NOTIFY_OP_ACQUIRED_LOCK:
3778 rbd_handle_acquired_lock(rbd_dev, struct_v, &p);
3779 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
3781 case RBD_NOTIFY_OP_RELEASED_LOCK:
3782 rbd_handle_released_lock(rbd_dev, struct_v, &p);
3783 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
3785 case RBD_NOTIFY_OP_REQUEST_LOCK:
3786 if (rbd_handle_request_lock(rbd_dev, struct_v, &p))
3788 * send ResponseMessage(0) back so the client
3789 * can detect a missing owner
3791 rbd_acknowledge_notify_result(rbd_dev, notify_id,
3794 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
3796 case RBD_NOTIFY_OP_HEADER_UPDATE:
3797 ret = rbd_dev_refresh(rbd_dev);
3799 rbd_warn(rbd_dev, "refresh failed: %d", ret);
3801 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
3804 if (rbd_is_lock_owner(rbd_dev))
3805 rbd_acknowledge_notify_result(rbd_dev, notify_id,
3806 cookie, -EOPNOTSUPP);
3808 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
3813 static void __rbd_unregister_watch(struct rbd_device *rbd_dev);
3815 static void rbd_watch_errcb(void *arg, u64 cookie, int err)
3817 struct rbd_device *rbd_dev = arg;
3819 rbd_warn(rbd_dev, "encountered watch error: %d", err);
3821 down_write(&rbd_dev->lock_rwsem);
3822 rbd_set_owner_cid(rbd_dev, &rbd_empty_cid);
3823 up_write(&rbd_dev->lock_rwsem);
3825 mutex_lock(&rbd_dev->watch_mutex);
3826 if (rbd_dev->watch_state == RBD_WATCH_STATE_REGISTERED) {
3827 __rbd_unregister_watch(rbd_dev);
3828 rbd_dev->watch_state = RBD_WATCH_STATE_ERROR;
3830 queue_delayed_work(rbd_dev->task_wq, &rbd_dev->watch_dwork, 0);
3832 mutex_unlock(&rbd_dev->watch_mutex);
3836 * watch_mutex must be locked
3838 static int __rbd_register_watch(struct rbd_device *rbd_dev)
3840 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3841 struct ceph_osd_linger_request *handle;
3843 rbd_assert(!rbd_dev->watch_handle);
3844 dout("%s rbd_dev %p\n", __func__, rbd_dev);
3846 handle = ceph_osdc_watch(osdc, &rbd_dev->header_oid,
3847 &rbd_dev->header_oloc, rbd_watch_cb,
3848 rbd_watch_errcb, rbd_dev);
3850 return PTR_ERR(handle);
3852 rbd_dev->watch_handle = handle;
3857 * watch_mutex must be locked
3859 static void __rbd_unregister_watch(struct rbd_device *rbd_dev)
3861 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3864 rbd_assert(rbd_dev->watch_handle);
3865 dout("%s rbd_dev %p\n", __func__, rbd_dev);
3867 ret = ceph_osdc_unwatch(osdc, rbd_dev->watch_handle);
3869 rbd_warn(rbd_dev, "failed to unwatch: %d", ret);
3871 rbd_dev->watch_handle = NULL;
3874 static int rbd_register_watch(struct rbd_device *rbd_dev)
3878 mutex_lock(&rbd_dev->watch_mutex);
3879 rbd_assert(rbd_dev->watch_state == RBD_WATCH_STATE_UNREGISTERED);
3880 ret = __rbd_register_watch(rbd_dev);
3884 rbd_dev->watch_state = RBD_WATCH_STATE_REGISTERED;
3885 rbd_dev->watch_cookie = rbd_dev->watch_handle->linger_id;
3888 mutex_unlock(&rbd_dev->watch_mutex);
3892 static void cancel_tasks_sync(struct rbd_device *rbd_dev)
3894 dout("%s rbd_dev %p\n", __func__, rbd_dev);
3896 cancel_delayed_work_sync(&rbd_dev->watch_dwork);
3897 cancel_work_sync(&rbd_dev->acquired_lock_work);
3898 cancel_work_sync(&rbd_dev->released_lock_work);
3899 cancel_delayed_work_sync(&rbd_dev->lock_dwork);
3900 cancel_work_sync(&rbd_dev->unlock_work);
3903 static void rbd_unregister_watch(struct rbd_device *rbd_dev)
3905 WARN_ON(waitqueue_active(&rbd_dev->lock_waitq));
3906 cancel_tasks_sync(rbd_dev);
3908 mutex_lock(&rbd_dev->watch_mutex);
3909 if (rbd_dev->watch_state == RBD_WATCH_STATE_REGISTERED)
3910 __rbd_unregister_watch(rbd_dev);
3911 rbd_dev->watch_state = RBD_WATCH_STATE_UNREGISTERED;
3912 mutex_unlock(&rbd_dev->watch_mutex);
3914 ceph_osdc_flush_notifies(&rbd_dev->rbd_client->client->osdc);
3917 static void rbd_reregister_watch(struct work_struct *work)
3919 struct rbd_device *rbd_dev = container_of(to_delayed_work(work),
3920 struct rbd_device, watch_dwork);
3921 bool was_lock_owner = false;
3924 dout("%s rbd_dev %p\n", __func__, rbd_dev);
3926 down_write(&rbd_dev->lock_rwsem);
3927 if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED)
3928 was_lock_owner = rbd_release_lock(rbd_dev);
3930 mutex_lock(&rbd_dev->watch_mutex);
3931 if (rbd_dev->watch_state != RBD_WATCH_STATE_ERROR)
3934 ret = __rbd_register_watch(rbd_dev);
3936 rbd_warn(rbd_dev, "failed to reregister watch: %d", ret);
3937 if (ret != -EBLACKLISTED)
3938 queue_delayed_work(rbd_dev->task_wq,
3939 &rbd_dev->watch_dwork,
3944 rbd_dev->watch_state = RBD_WATCH_STATE_REGISTERED;
3945 rbd_dev->watch_cookie = rbd_dev->watch_handle->linger_id;
3946 mutex_unlock(&rbd_dev->watch_mutex);
3948 ret = rbd_dev_refresh(rbd_dev);
3950 rbd_warn(rbd_dev, "reregisteration refresh failed: %d", ret);
3952 if (was_lock_owner) {
3953 ret = rbd_try_lock(rbd_dev);
3955 rbd_warn(rbd_dev, "reregisteration lock failed: %d",
3959 up_write(&rbd_dev->lock_rwsem);
3960 wake_requests(rbd_dev, true);
3964 mutex_unlock(&rbd_dev->watch_mutex);
3965 up_write(&rbd_dev->lock_rwsem);
3969 * Synchronous osd object method call. Returns the number of bytes
3970 * returned in the outbound buffer, or a negative error code.
3972 static int rbd_obj_method_sync(struct rbd_device *rbd_dev,
3973 const char *object_name,
3974 const char *class_name,
3975 const char *method_name,
3976 const void *outbound,
3977 size_t outbound_size,
3979 size_t inbound_size)
3981 struct rbd_obj_request *obj_request;
3982 struct page **pages;
3987 * Method calls are ultimately read operations. The result
3988 * should placed into the inbound buffer provided. They
3989 * also supply outbound data--parameters for the object
3990 * method. Currently if this is present it will be a
3993 page_count = (u32)calc_pages_for(0, inbound_size);
3994 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
3996 return PTR_ERR(pages);
3999 obj_request = rbd_obj_request_create(object_name, 0, inbound_size,
4004 obj_request->pages = pages;
4005 obj_request->page_count = page_count;
4007 obj_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_READ, 1,
4009 if (!obj_request->osd_req)
4012 osd_req_op_cls_init(obj_request->osd_req, 0, CEPH_OSD_OP_CALL,
4013 class_name, method_name);
4014 if (outbound_size) {
4015 struct ceph_pagelist *pagelist;
4017 pagelist = kmalloc(sizeof (*pagelist), GFP_NOFS);
4021 ceph_pagelist_init(pagelist);
4022 ceph_pagelist_append(pagelist, outbound, outbound_size);
4023 osd_req_op_cls_request_data_pagelist(obj_request->osd_req, 0,
4026 osd_req_op_cls_response_data_pages(obj_request->osd_req, 0,
4027 obj_request->pages, inbound_size,
4030 rbd_obj_request_submit(obj_request);
4031 ret = rbd_obj_request_wait(obj_request);
4035 ret = obj_request->result;
4039 rbd_assert(obj_request->xferred < (u64)INT_MAX);
4040 ret = (int)obj_request->xferred;
4041 ceph_copy_from_page_vector(pages, inbound, 0, obj_request->xferred);
4044 rbd_obj_request_put(obj_request);
4046 ceph_release_page_vector(pages, page_count);
4052 * lock_rwsem must be held for read
4054 static void rbd_wait_state_locked(struct rbd_device *rbd_dev)
4060 * Note the use of mod_delayed_work() in rbd_acquire_lock()
4061 * and cancel_delayed_work() in wake_requests().
4063 dout("%s rbd_dev %p queueing lock_dwork\n", __func__, rbd_dev);
4064 queue_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork, 0);
4065 prepare_to_wait_exclusive(&rbd_dev->lock_waitq, &wait,
4066 TASK_UNINTERRUPTIBLE);
4067 up_read(&rbd_dev->lock_rwsem);
4069 down_read(&rbd_dev->lock_rwsem);
4070 } while (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED);
4071 finish_wait(&rbd_dev->lock_waitq, &wait);
4074 static void rbd_queue_workfn(struct work_struct *work)
4076 struct request *rq = blk_mq_rq_from_pdu(work);
4077 struct rbd_device *rbd_dev = rq->q->queuedata;
4078 struct rbd_img_request *img_request;
4079 struct ceph_snap_context *snapc = NULL;
4080 u64 offset = (u64)blk_rq_pos(rq) << SECTOR_SHIFT;
4081 u64 length = blk_rq_bytes(rq);
4082 enum obj_operation_type op_type;
4084 bool must_be_locked;
4087 if (rq->cmd_type != REQ_TYPE_FS) {
4088 dout("%s: non-fs request type %d\n", __func__,
4089 (int) rq->cmd_type);
4094 if (req_op(rq) == REQ_OP_DISCARD)
4095 op_type = OBJ_OP_DISCARD;
4096 else if (req_op(rq) == REQ_OP_WRITE)
4097 op_type = OBJ_OP_WRITE;
4099 op_type = OBJ_OP_READ;
4101 /* Ignore/skip any zero-length requests */
4104 dout("%s: zero-length request\n", __func__);
4109 /* Only reads are allowed to a read-only device */
4111 if (op_type != OBJ_OP_READ) {
4112 if (rbd_dev->mapping.read_only) {
4116 rbd_assert(rbd_dev->spec->snap_id == CEPH_NOSNAP);
4120 * Quit early if the mapped snapshot no longer exists. It's
4121 * still possible the snapshot will have disappeared by the
4122 * time our request arrives at the osd, but there's no sense in
4123 * sending it if we already know.
4125 if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags)) {
4126 dout("request for non-existent snapshot");
4127 rbd_assert(rbd_dev->spec->snap_id != CEPH_NOSNAP);
4132 if (offset && length > U64_MAX - offset + 1) {
4133 rbd_warn(rbd_dev, "bad request range (%llu~%llu)", offset,
4136 goto err_rq; /* Shouldn't happen */
4139 blk_mq_start_request(rq);
4141 down_read(&rbd_dev->header_rwsem);
4142 mapping_size = rbd_dev->mapping.size;
4143 if (op_type != OBJ_OP_READ) {
4144 snapc = rbd_dev->header.snapc;
4145 ceph_get_snap_context(snapc);
4146 must_be_locked = rbd_is_lock_supported(rbd_dev);
4148 must_be_locked = rbd_dev->opts->lock_on_read &&
4149 rbd_is_lock_supported(rbd_dev);
4151 up_read(&rbd_dev->header_rwsem);
4153 if (offset + length > mapping_size) {
4154 rbd_warn(rbd_dev, "beyond EOD (%llu~%llu > %llu)", offset,
4155 length, mapping_size);
4160 if (must_be_locked) {
4161 down_read(&rbd_dev->lock_rwsem);
4162 if (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED)
4163 rbd_wait_state_locked(rbd_dev);
4166 img_request = rbd_img_request_create(rbd_dev, offset, length, op_type,
4172 img_request->rq = rq;
4173 snapc = NULL; /* img_request consumes a ref */
4175 if (op_type == OBJ_OP_DISCARD)
4176 result = rbd_img_request_fill(img_request, OBJ_REQUEST_NODATA,
4179 result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
4182 goto err_img_request;
4184 result = rbd_img_request_submit(img_request);
4186 goto err_img_request;
4189 up_read(&rbd_dev->lock_rwsem);
4193 rbd_img_request_put(img_request);
4196 up_read(&rbd_dev->lock_rwsem);
4199 rbd_warn(rbd_dev, "%s %llx at %llx result %d",
4200 obj_op_name(op_type), length, offset, result);
4201 ceph_put_snap_context(snapc);
4203 blk_mq_end_request(rq, result);
4206 static int rbd_queue_rq(struct blk_mq_hw_ctx *hctx,
4207 const struct blk_mq_queue_data *bd)
4209 struct request *rq = bd->rq;
4210 struct work_struct *work = blk_mq_rq_to_pdu(rq);
4212 queue_work(rbd_wq, work);
4213 return BLK_MQ_RQ_QUEUE_OK;
4216 static void rbd_free_disk(struct rbd_device *rbd_dev)
4218 struct gendisk *disk = rbd_dev->disk;
4223 rbd_dev->disk = NULL;
4224 if (disk->flags & GENHD_FL_UP) {
4227 blk_cleanup_queue(disk->queue);
4228 blk_mq_free_tag_set(&rbd_dev->tag_set);
4233 static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
4234 const char *object_name,
4235 u64 offset, u64 length, void *buf)
4238 struct rbd_obj_request *obj_request;
4239 struct page **pages = NULL;
4244 page_count = (u32) calc_pages_for(offset, length);
4245 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
4247 return PTR_ERR(pages);
4250 obj_request = rbd_obj_request_create(object_name, offset, length,
4255 obj_request->pages = pages;
4256 obj_request->page_count = page_count;
4258 obj_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_READ, 1,
4260 if (!obj_request->osd_req)
4263 osd_req_op_extent_init(obj_request->osd_req, 0, CEPH_OSD_OP_READ,
4264 offset, length, 0, 0);
4265 osd_req_op_extent_osd_data_pages(obj_request->osd_req, 0,
4267 obj_request->length,
4268 obj_request->offset & ~PAGE_MASK,
4271 rbd_obj_request_submit(obj_request);
4272 ret = rbd_obj_request_wait(obj_request);
4276 ret = obj_request->result;
4280 rbd_assert(obj_request->xferred <= (u64) SIZE_MAX);
4281 size = (size_t) obj_request->xferred;
4282 ceph_copy_from_page_vector(pages, buf, 0, size);
4283 rbd_assert(size <= (size_t)INT_MAX);
4287 rbd_obj_request_put(obj_request);
4289 ceph_release_page_vector(pages, page_count);
4295 * Read the complete header for the given rbd device. On successful
4296 * return, the rbd_dev->header field will contain up-to-date
4297 * information about the image.
4299 static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev)
4301 struct rbd_image_header_ondisk *ondisk = NULL;
4308 * The complete header will include an array of its 64-bit
4309 * snapshot ids, followed by the names of those snapshots as
4310 * a contiguous block of NUL-terminated strings. Note that
4311 * the number of snapshots could change by the time we read
4312 * it in, in which case we re-read it.
4319 size = sizeof (*ondisk);
4320 size += snap_count * sizeof (struct rbd_image_snap_ondisk);
4322 ondisk = kmalloc(size, GFP_KERNEL);
4326 ret = rbd_obj_read_sync(rbd_dev, rbd_dev->header_oid.name,
4330 if ((size_t)ret < size) {
4332 rbd_warn(rbd_dev, "short header read (want %zd got %d)",
4336 if (!rbd_dev_ondisk_valid(ondisk)) {
4338 rbd_warn(rbd_dev, "invalid header");
4342 names_size = le64_to_cpu(ondisk->snap_names_len);
4343 want_count = snap_count;
4344 snap_count = le32_to_cpu(ondisk->snap_count);
4345 } while (snap_count != want_count);
4347 ret = rbd_header_from_disk(rbd_dev, ondisk);
4355 * Clear the rbd device's EXISTS flag if the snapshot it's mapped to
4356 * has disappeared from the (just updated) snapshot context.
4358 static void rbd_exists_validate(struct rbd_device *rbd_dev)
4362 if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags))
4365 snap_id = rbd_dev->spec->snap_id;
4366 if (snap_id == CEPH_NOSNAP)
4369 if (rbd_dev_snap_index(rbd_dev, snap_id) == BAD_SNAP_INDEX)
4370 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
4373 static void rbd_dev_update_size(struct rbd_device *rbd_dev)
4378 * If EXISTS is not set, rbd_dev->disk may be NULL, so don't
4379 * try to update its size. If REMOVING is set, updating size
4380 * is just useless work since the device can't be opened.
4382 if (test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags) &&
4383 !test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags)) {
4384 size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE;
4385 dout("setting size to %llu sectors", (unsigned long long)size);
4386 set_capacity(rbd_dev->disk, size);
4387 revalidate_disk(rbd_dev->disk);
4391 static int rbd_dev_refresh(struct rbd_device *rbd_dev)
4396 down_write(&rbd_dev->header_rwsem);
4397 mapping_size = rbd_dev->mapping.size;
4399 ret = rbd_dev_header_info(rbd_dev);
4404 * If there is a parent, see if it has disappeared due to the
4405 * mapped image getting flattened.
4407 if (rbd_dev->parent) {
4408 ret = rbd_dev_v2_parent_info(rbd_dev);
4413 if (rbd_dev->spec->snap_id == CEPH_NOSNAP) {
4414 rbd_dev->mapping.size = rbd_dev->header.image_size;
4416 /* validate mapped snapshot's EXISTS flag */
4417 rbd_exists_validate(rbd_dev);
4421 up_write(&rbd_dev->header_rwsem);
4422 if (!ret && mapping_size != rbd_dev->mapping.size)
4423 rbd_dev_update_size(rbd_dev);
4428 static int rbd_init_request(void *data, struct request *rq,
4429 unsigned int hctx_idx, unsigned int request_idx,
4430 unsigned int numa_node)
4432 struct work_struct *work = blk_mq_rq_to_pdu(rq);
4434 INIT_WORK(work, rbd_queue_workfn);
4438 static struct blk_mq_ops rbd_mq_ops = {
4439 .queue_rq = rbd_queue_rq,
4440 .map_queue = blk_mq_map_queue,
4441 .init_request = rbd_init_request,
4444 static int rbd_init_disk(struct rbd_device *rbd_dev)
4446 struct gendisk *disk;
4447 struct request_queue *q;
4451 /* create gendisk info */
4452 disk = alloc_disk(single_major ?
4453 (1 << RBD_SINGLE_MAJOR_PART_SHIFT) :
4454 RBD_MINORS_PER_MAJOR);
4458 snprintf(disk->disk_name, sizeof(disk->disk_name), RBD_DRV_NAME "%d",
4460 disk->major = rbd_dev->major;
4461 disk->first_minor = rbd_dev->minor;
4463 disk->flags |= GENHD_FL_EXT_DEVT;
4464 disk->fops = &rbd_bd_ops;
4465 disk->private_data = rbd_dev;
4467 memset(&rbd_dev->tag_set, 0, sizeof(rbd_dev->tag_set));
4468 rbd_dev->tag_set.ops = &rbd_mq_ops;
4469 rbd_dev->tag_set.queue_depth = rbd_dev->opts->queue_depth;
4470 rbd_dev->tag_set.numa_node = NUMA_NO_NODE;
4471 rbd_dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE;
4472 rbd_dev->tag_set.nr_hw_queues = 1;
4473 rbd_dev->tag_set.cmd_size = sizeof(struct work_struct);
4475 err = blk_mq_alloc_tag_set(&rbd_dev->tag_set);
4479 q = blk_mq_init_queue(&rbd_dev->tag_set);
4485 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
4486 /* QUEUE_FLAG_ADD_RANDOM is off by default for blk-mq */
4488 /* set io sizes to object size */
4489 segment_size = rbd_obj_bytes(&rbd_dev->header);
4490 blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE);
4491 q->limits.max_sectors = queue_max_hw_sectors(q);
4492 blk_queue_max_segments(q, segment_size / SECTOR_SIZE);
4493 blk_queue_max_segment_size(q, segment_size);
4494 blk_queue_io_min(q, segment_size);
4495 blk_queue_io_opt(q, segment_size);
4497 /* enable the discard support */
4498 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
4499 q->limits.discard_granularity = segment_size;
4500 q->limits.discard_alignment = segment_size;
4501 blk_queue_max_discard_sectors(q, segment_size / SECTOR_SIZE);
4502 q->limits.discard_zeroes_data = 1;
4504 if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC))
4505 q->backing_dev_info.capabilities |= BDI_CAP_STABLE_WRITES;
4509 q->queuedata = rbd_dev;
4511 rbd_dev->disk = disk;
4515 blk_mq_free_tag_set(&rbd_dev->tag_set);
4525 static struct rbd_device *dev_to_rbd_dev(struct device *dev)
4527 return container_of(dev, struct rbd_device, dev);
4530 static ssize_t rbd_size_show(struct device *dev,
4531 struct device_attribute *attr, char *buf)
4533 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4535 return sprintf(buf, "%llu\n",
4536 (unsigned long long)rbd_dev->mapping.size);
4540 * Note this shows the features for whatever's mapped, which is not
4541 * necessarily the base image.
4543 static ssize_t rbd_features_show(struct device *dev,
4544 struct device_attribute *attr, char *buf)
4546 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4548 return sprintf(buf, "0x%016llx\n",
4549 (unsigned long long)rbd_dev->mapping.features);
4552 static ssize_t rbd_major_show(struct device *dev,
4553 struct device_attribute *attr, char *buf)
4555 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4558 return sprintf(buf, "%d\n", rbd_dev->major);
4560 return sprintf(buf, "(none)\n");
4563 static ssize_t rbd_minor_show(struct device *dev,
4564 struct device_attribute *attr, char *buf)
4566 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4568 return sprintf(buf, "%d\n", rbd_dev->minor);
4571 static ssize_t rbd_client_addr_show(struct device *dev,
4572 struct device_attribute *attr, char *buf)
4574 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4575 struct ceph_entity_addr *client_addr =
4576 ceph_client_addr(rbd_dev->rbd_client->client);
4578 return sprintf(buf, "%pISpc/%u\n", &client_addr->in_addr,
4579 le32_to_cpu(client_addr->nonce));
4582 static ssize_t rbd_client_id_show(struct device *dev,
4583 struct device_attribute *attr, char *buf)
4585 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4587 return sprintf(buf, "client%lld\n",
4588 ceph_client_gid(rbd_dev->rbd_client->client));
4591 static ssize_t rbd_cluster_fsid_show(struct device *dev,
4592 struct device_attribute *attr, char *buf)
4594 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4596 return sprintf(buf, "%pU\n", &rbd_dev->rbd_client->client->fsid);
4599 static ssize_t rbd_config_info_show(struct device *dev,
4600 struct device_attribute *attr, char *buf)
4602 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4604 return sprintf(buf, "%s\n", rbd_dev->config_info);
4607 static ssize_t rbd_pool_show(struct device *dev,
4608 struct device_attribute *attr, char *buf)
4610 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4612 return sprintf(buf, "%s\n", rbd_dev->spec->pool_name);
4615 static ssize_t rbd_pool_id_show(struct device *dev,
4616 struct device_attribute *attr, char *buf)
4618 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4620 return sprintf(buf, "%llu\n",
4621 (unsigned long long) rbd_dev->spec->pool_id);
4624 static ssize_t rbd_name_show(struct device *dev,
4625 struct device_attribute *attr, char *buf)
4627 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4629 if (rbd_dev->spec->image_name)
4630 return sprintf(buf, "%s\n", rbd_dev->spec->image_name);
4632 return sprintf(buf, "(unknown)\n");
4635 static ssize_t rbd_image_id_show(struct device *dev,
4636 struct device_attribute *attr, char *buf)
4638 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4640 return sprintf(buf, "%s\n", rbd_dev->spec->image_id);
4644 * Shows the name of the currently-mapped snapshot (or
4645 * RBD_SNAP_HEAD_NAME for the base image).
4647 static ssize_t rbd_snap_show(struct device *dev,
4648 struct device_attribute *attr,
4651 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4653 return sprintf(buf, "%s\n", rbd_dev->spec->snap_name);
4656 static ssize_t rbd_snap_id_show(struct device *dev,
4657 struct device_attribute *attr, char *buf)
4659 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4661 return sprintf(buf, "%llu\n", rbd_dev->spec->snap_id);
4665 * For a v2 image, shows the chain of parent images, separated by empty
4666 * lines. For v1 images or if there is no parent, shows "(no parent
4669 static ssize_t rbd_parent_show(struct device *dev,
4670 struct device_attribute *attr,
4673 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4676 if (!rbd_dev->parent)
4677 return sprintf(buf, "(no parent image)\n");
4679 for ( ; rbd_dev->parent; rbd_dev = rbd_dev->parent) {
4680 struct rbd_spec *spec = rbd_dev->parent_spec;
4682 count += sprintf(&buf[count], "%s"
4683 "pool_id %llu\npool_name %s\n"
4684 "image_id %s\nimage_name %s\n"
4685 "snap_id %llu\nsnap_name %s\n"
4687 !count ? "" : "\n", /* first? */
4688 spec->pool_id, spec->pool_name,
4689 spec->image_id, spec->image_name ?: "(unknown)",
4690 spec->snap_id, spec->snap_name,
4691 rbd_dev->parent_overlap);
4697 static ssize_t rbd_image_refresh(struct device *dev,
4698 struct device_attribute *attr,
4702 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4705 ret = rbd_dev_refresh(rbd_dev);
4712 static DEVICE_ATTR(size, S_IRUGO, rbd_size_show, NULL);
4713 static DEVICE_ATTR(features, S_IRUGO, rbd_features_show, NULL);
4714 static DEVICE_ATTR(major, S_IRUGO, rbd_major_show, NULL);
4715 static DEVICE_ATTR(minor, S_IRUGO, rbd_minor_show, NULL);
4716 static DEVICE_ATTR(client_addr, S_IRUGO, rbd_client_addr_show, NULL);
4717 static DEVICE_ATTR(client_id, S_IRUGO, rbd_client_id_show, NULL);
4718 static DEVICE_ATTR(cluster_fsid, S_IRUGO, rbd_cluster_fsid_show, NULL);
4719 static DEVICE_ATTR(config_info, S_IRUSR, rbd_config_info_show, NULL);
4720 static DEVICE_ATTR(pool, S_IRUGO, rbd_pool_show, NULL);
4721 static DEVICE_ATTR(pool_id, S_IRUGO, rbd_pool_id_show, NULL);
4722 static DEVICE_ATTR(name, S_IRUGO, rbd_name_show, NULL);
4723 static DEVICE_ATTR(image_id, S_IRUGO, rbd_image_id_show, NULL);
4724 static DEVICE_ATTR(refresh, S_IWUSR, NULL, rbd_image_refresh);
4725 static DEVICE_ATTR(current_snap, S_IRUGO, rbd_snap_show, NULL);
4726 static DEVICE_ATTR(snap_id, S_IRUGO, rbd_snap_id_show, NULL);
4727 static DEVICE_ATTR(parent, S_IRUGO, rbd_parent_show, NULL);
4729 static struct attribute *rbd_attrs[] = {
4730 &dev_attr_size.attr,
4731 &dev_attr_features.attr,
4732 &dev_attr_major.attr,
4733 &dev_attr_minor.attr,
4734 &dev_attr_client_addr.attr,
4735 &dev_attr_client_id.attr,
4736 &dev_attr_cluster_fsid.attr,
4737 &dev_attr_config_info.attr,
4738 &dev_attr_pool.attr,
4739 &dev_attr_pool_id.attr,
4740 &dev_attr_name.attr,
4741 &dev_attr_image_id.attr,
4742 &dev_attr_current_snap.attr,
4743 &dev_attr_snap_id.attr,
4744 &dev_attr_parent.attr,
4745 &dev_attr_refresh.attr,
4749 static struct attribute_group rbd_attr_group = {
4753 static const struct attribute_group *rbd_attr_groups[] = {
4758 static void rbd_dev_release(struct device *dev);
4760 static struct device_type rbd_device_type = {
4762 .groups = rbd_attr_groups,
4763 .release = rbd_dev_release,
4766 static struct rbd_spec *rbd_spec_get(struct rbd_spec *spec)
4768 kref_get(&spec->kref);
4773 static void rbd_spec_free(struct kref *kref);
4774 static void rbd_spec_put(struct rbd_spec *spec)
4777 kref_put(&spec->kref, rbd_spec_free);
4780 static struct rbd_spec *rbd_spec_alloc(void)
4782 struct rbd_spec *spec;
4784 spec = kzalloc(sizeof (*spec), GFP_KERNEL);
4788 spec->pool_id = CEPH_NOPOOL;
4789 spec->snap_id = CEPH_NOSNAP;
4790 kref_init(&spec->kref);
4795 static void rbd_spec_free(struct kref *kref)
4797 struct rbd_spec *spec = container_of(kref, struct rbd_spec, kref);
4799 kfree(spec->pool_name);
4800 kfree(spec->image_id);
4801 kfree(spec->image_name);
4802 kfree(spec->snap_name);
4806 static void rbd_dev_free(struct rbd_device *rbd_dev)
4808 WARN_ON(rbd_dev->watch_state != RBD_WATCH_STATE_UNREGISTERED);
4809 WARN_ON(rbd_dev->lock_state != RBD_LOCK_STATE_UNLOCKED);
4811 ceph_oid_destroy(&rbd_dev->header_oid);
4812 ceph_oloc_destroy(&rbd_dev->header_oloc);
4813 kfree(rbd_dev->config_info);
4815 rbd_put_client(rbd_dev->rbd_client);
4816 rbd_spec_put(rbd_dev->spec);
4817 kfree(rbd_dev->opts);
4821 static void rbd_dev_release(struct device *dev)
4823 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4824 bool need_put = !!rbd_dev->opts;
4827 destroy_workqueue(rbd_dev->task_wq);
4828 ida_simple_remove(&rbd_dev_id_ida, rbd_dev->dev_id);
4831 rbd_dev_free(rbd_dev);
4834 * This is racy, but way better than putting module outside of
4835 * the release callback. The race window is pretty small, so
4836 * doing something similar to dm (dm-builtin.c) is overkill.
4839 module_put(THIS_MODULE);
4842 static struct rbd_device *__rbd_dev_create(struct rbd_client *rbdc,
4843 struct rbd_spec *spec)
4845 struct rbd_device *rbd_dev;
4847 rbd_dev = kzalloc(sizeof(*rbd_dev), GFP_KERNEL);
4851 spin_lock_init(&rbd_dev->lock);
4852 INIT_LIST_HEAD(&rbd_dev->node);
4853 init_rwsem(&rbd_dev->header_rwsem);
4855 ceph_oid_init(&rbd_dev->header_oid);
4856 ceph_oloc_init(&rbd_dev->header_oloc);
4858 mutex_init(&rbd_dev->watch_mutex);
4859 rbd_dev->watch_state = RBD_WATCH_STATE_UNREGISTERED;
4860 INIT_DELAYED_WORK(&rbd_dev->watch_dwork, rbd_reregister_watch);
4862 init_rwsem(&rbd_dev->lock_rwsem);
4863 rbd_dev->lock_state = RBD_LOCK_STATE_UNLOCKED;
4864 INIT_WORK(&rbd_dev->acquired_lock_work, rbd_notify_acquired_lock);
4865 INIT_WORK(&rbd_dev->released_lock_work, rbd_notify_released_lock);
4866 INIT_DELAYED_WORK(&rbd_dev->lock_dwork, rbd_acquire_lock);
4867 INIT_WORK(&rbd_dev->unlock_work, rbd_release_lock_work);
4868 init_waitqueue_head(&rbd_dev->lock_waitq);
4870 rbd_dev->dev.bus = &rbd_bus_type;
4871 rbd_dev->dev.type = &rbd_device_type;
4872 rbd_dev->dev.parent = &rbd_root_dev;
4873 device_initialize(&rbd_dev->dev);
4875 rbd_dev->rbd_client = rbdc;
4876 rbd_dev->spec = spec;
4878 rbd_dev->layout.stripe_unit = 1 << RBD_MAX_OBJ_ORDER;
4879 rbd_dev->layout.stripe_count = 1;
4880 rbd_dev->layout.object_size = 1 << RBD_MAX_OBJ_ORDER;
4881 rbd_dev->layout.pool_id = spec->pool_id;
4882 RCU_INIT_POINTER(rbd_dev->layout.pool_ns, NULL);
4888 * Create a mapping rbd_dev.
4890 static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
4891 struct rbd_spec *spec,
4892 struct rbd_options *opts)
4894 struct rbd_device *rbd_dev;
4896 rbd_dev = __rbd_dev_create(rbdc, spec);
4900 rbd_dev->opts = opts;
4902 /* get an id and fill in device name */
4903 rbd_dev->dev_id = ida_simple_get(&rbd_dev_id_ida, 0,
4904 minor_to_rbd_dev_id(1 << MINORBITS),
4906 if (rbd_dev->dev_id < 0)
4909 sprintf(rbd_dev->name, RBD_DRV_NAME "%d", rbd_dev->dev_id);
4910 rbd_dev->task_wq = alloc_ordered_workqueue("%s-tasks", WQ_MEM_RECLAIM,
4912 if (!rbd_dev->task_wq)
4915 /* we have a ref from do_rbd_add() */
4916 __module_get(THIS_MODULE);
4918 dout("%s rbd_dev %p dev_id %d\n", __func__, rbd_dev, rbd_dev->dev_id);
4922 ida_simple_remove(&rbd_dev_id_ida, rbd_dev->dev_id);
4924 rbd_dev_free(rbd_dev);
4928 static void rbd_dev_destroy(struct rbd_device *rbd_dev)
4931 put_device(&rbd_dev->dev);
4935 * Get the size and object order for an image snapshot, or if
4936 * snap_id is CEPH_NOSNAP, gets this information for the base
4939 static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
4940 u8 *order, u64 *snap_size)
4942 __le64 snapid = cpu_to_le64(snap_id);
4947 } __attribute__ ((packed)) size_buf = { 0 };
4949 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_oid.name,
4951 &snapid, sizeof (snapid),
4952 &size_buf, sizeof (size_buf));
4953 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4956 if (ret < sizeof (size_buf))
4960 *order = size_buf.order;
4961 dout(" order %u", (unsigned int)*order);
4963 *snap_size = le64_to_cpu(size_buf.size);
4965 dout(" snap_id 0x%016llx snap_size = %llu\n",
4966 (unsigned long long)snap_id,
4967 (unsigned long long)*snap_size);
4972 static int rbd_dev_v2_image_size(struct rbd_device *rbd_dev)
4974 return _rbd_dev_v2_snap_size(rbd_dev, CEPH_NOSNAP,
4975 &rbd_dev->header.obj_order,
4976 &rbd_dev->header.image_size);
4979 static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev)
4985 reply_buf = kzalloc(RBD_OBJ_PREFIX_LEN_MAX, GFP_KERNEL);
4989 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_oid.name,
4990 "rbd", "get_object_prefix", NULL, 0,
4991 reply_buf, RBD_OBJ_PREFIX_LEN_MAX);
4992 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4997 rbd_dev->header.object_prefix = ceph_extract_encoded_string(&p,
4998 p + ret, NULL, GFP_NOIO);
5001 if (IS_ERR(rbd_dev->header.object_prefix)) {
5002 ret = PTR_ERR(rbd_dev->header.object_prefix);
5003 rbd_dev->header.object_prefix = NULL;
5005 dout(" object_prefix = %s\n", rbd_dev->header.object_prefix);
5013 static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
5016 __le64 snapid = cpu_to_le64(snap_id);
5020 } __attribute__ ((packed)) features_buf = { 0 };
5024 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_oid.name,
5025 "rbd", "get_features",
5026 &snapid, sizeof (snapid),
5027 &features_buf, sizeof (features_buf));
5028 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
5031 if (ret < sizeof (features_buf))
5034 unsup = le64_to_cpu(features_buf.incompat) & ~RBD_FEATURES_SUPPORTED;
5036 rbd_warn(rbd_dev, "image uses unsupported features: 0x%llx",
5041 *snap_features = le64_to_cpu(features_buf.features);
5043 dout(" snap_id 0x%016llx features = 0x%016llx incompat = 0x%016llx\n",
5044 (unsigned long long)snap_id,
5045 (unsigned long long)*snap_features,
5046 (unsigned long long)le64_to_cpu(features_buf.incompat));
5051 static int rbd_dev_v2_features(struct rbd_device *rbd_dev)
5053 return _rbd_dev_v2_snap_features(rbd_dev, CEPH_NOSNAP,
5054 &rbd_dev->header.features);
5057 static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
5059 struct rbd_spec *parent_spec;
5061 void *reply_buf = NULL;
5071 parent_spec = rbd_spec_alloc();
5075 size = sizeof (__le64) + /* pool_id */
5076 sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX + /* image_id */
5077 sizeof (__le64) + /* snap_id */
5078 sizeof (__le64); /* overlap */
5079 reply_buf = kmalloc(size, GFP_KERNEL);
5085 snapid = cpu_to_le64(rbd_dev->spec->snap_id);
5086 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_oid.name,
5087 "rbd", "get_parent",
5088 &snapid, sizeof (snapid),
5090 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
5095 end = reply_buf + ret;
5097 ceph_decode_64_safe(&p, end, pool_id, out_err);
5098 if (pool_id == CEPH_NOPOOL) {
5100 * Either the parent never existed, or we have
5101 * record of it but the image got flattened so it no
5102 * longer has a parent. When the parent of a
5103 * layered image disappears we immediately set the
5104 * overlap to 0. The effect of this is that all new
5105 * requests will be treated as if the image had no
5108 if (rbd_dev->parent_overlap) {
5109 rbd_dev->parent_overlap = 0;
5110 rbd_dev_parent_put(rbd_dev);
5111 pr_info("%s: clone image has been flattened\n",
5112 rbd_dev->disk->disk_name);
5115 goto out; /* No parent? No problem. */
5118 /* The ceph file layout needs to fit pool id in 32 bits */
5121 if (pool_id > (u64)U32_MAX) {
5122 rbd_warn(NULL, "parent pool id too large (%llu > %u)",
5123 (unsigned long long)pool_id, U32_MAX);
5127 image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
5128 if (IS_ERR(image_id)) {
5129 ret = PTR_ERR(image_id);
5132 ceph_decode_64_safe(&p, end, snap_id, out_err);
5133 ceph_decode_64_safe(&p, end, overlap, out_err);
5136 * The parent won't change (except when the clone is
5137 * flattened, already handled that). So we only need to
5138 * record the parent spec we have not already done so.
5140 if (!rbd_dev->parent_spec) {
5141 parent_spec->pool_id = pool_id;
5142 parent_spec->image_id = image_id;
5143 parent_spec->snap_id = snap_id;
5144 rbd_dev->parent_spec = parent_spec;
5145 parent_spec = NULL; /* rbd_dev now owns this */
5151 * We always update the parent overlap. If it's zero we issue
5152 * a warning, as we will proceed as if there was no parent.
5156 /* refresh, careful to warn just once */
5157 if (rbd_dev->parent_overlap)
5159 "clone now standalone (overlap became 0)");
5162 rbd_warn(rbd_dev, "clone is standalone (overlap 0)");
5165 rbd_dev->parent_overlap = overlap;
5171 rbd_spec_put(parent_spec);
5176 static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev)
5180 __le64 stripe_count;
5181 } __attribute__ ((packed)) striping_info_buf = { 0 };
5182 size_t size = sizeof (striping_info_buf);
5189 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_oid.name,
5190 "rbd", "get_stripe_unit_count", NULL, 0,
5191 (char *)&striping_info_buf, size);
5192 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
5199 * We don't actually support the "fancy striping" feature
5200 * (STRIPINGV2) yet, but if the striping sizes are the
5201 * defaults the behavior is the same as before. So find
5202 * out, and only fail if the image has non-default values.
5205 obj_size = (u64)1 << rbd_dev->header.obj_order;
5206 p = &striping_info_buf;
5207 stripe_unit = ceph_decode_64(&p);
5208 if (stripe_unit != obj_size) {
5209 rbd_warn(rbd_dev, "unsupported stripe unit "
5210 "(got %llu want %llu)",
5211 stripe_unit, obj_size);
5214 stripe_count = ceph_decode_64(&p);
5215 if (stripe_count != 1) {
5216 rbd_warn(rbd_dev, "unsupported stripe count "
5217 "(got %llu want 1)", stripe_count);
5220 rbd_dev->header.stripe_unit = stripe_unit;
5221 rbd_dev->header.stripe_count = stripe_count;
5226 static char *rbd_dev_image_name(struct rbd_device *rbd_dev)
5228 size_t image_id_size;
5233 void *reply_buf = NULL;
5235 char *image_name = NULL;
5238 rbd_assert(!rbd_dev->spec->image_name);
5240 len = strlen(rbd_dev->spec->image_id);
5241 image_id_size = sizeof (__le32) + len;
5242 image_id = kmalloc(image_id_size, GFP_KERNEL);
5247 end = image_id + image_id_size;
5248 ceph_encode_string(&p, end, rbd_dev->spec->image_id, (u32)len);
5250 size = sizeof (__le32) + RBD_IMAGE_NAME_LEN_MAX;
5251 reply_buf = kmalloc(size, GFP_KERNEL);
5255 ret = rbd_obj_method_sync(rbd_dev, RBD_DIRECTORY,
5256 "rbd", "dir_get_name",
5257 image_id, image_id_size,
5262 end = reply_buf + ret;
5264 image_name = ceph_extract_encoded_string(&p, end, &len, GFP_KERNEL);
5265 if (IS_ERR(image_name))
5268 dout("%s: name is %s len is %zd\n", __func__, image_name, len);
5276 static u64 rbd_v1_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
5278 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
5279 const char *snap_name;
5282 /* Skip over names until we find the one we are looking for */
5284 snap_name = rbd_dev->header.snap_names;
5285 while (which < snapc->num_snaps) {
5286 if (!strcmp(name, snap_name))
5287 return snapc->snaps[which];
5288 snap_name += strlen(snap_name) + 1;
5294 static u64 rbd_v2_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
5296 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
5301 for (which = 0; !found && which < snapc->num_snaps; which++) {
5302 const char *snap_name;
5304 snap_id = snapc->snaps[which];
5305 snap_name = rbd_dev_v2_snap_name(rbd_dev, snap_id);
5306 if (IS_ERR(snap_name)) {
5307 /* ignore no-longer existing snapshots */
5308 if (PTR_ERR(snap_name) == -ENOENT)
5313 found = !strcmp(name, snap_name);
5316 return found ? snap_id : CEPH_NOSNAP;
5320 * Assumes name is never RBD_SNAP_HEAD_NAME; returns CEPH_NOSNAP if
5321 * no snapshot by that name is found, or if an error occurs.
5323 static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
5325 if (rbd_dev->image_format == 1)
5326 return rbd_v1_snap_id_by_name(rbd_dev, name);
5328 return rbd_v2_snap_id_by_name(rbd_dev, name);
5332 * An image being mapped will have everything but the snap id.
5334 static int rbd_spec_fill_snap_id(struct rbd_device *rbd_dev)
5336 struct rbd_spec *spec = rbd_dev->spec;
5338 rbd_assert(spec->pool_id != CEPH_NOPOOL && spec->pool_name);
5339 rbd_assert(spec->image_id && spec->image_name);
5340 rbd_assert(spec->snap_name);
5342 if (strcmp(spec->snap_name, RBD_SNAP_HEAD_NAME)) {
5345 snap_id = rbd_snap_id_by_name(rbd_dev, spec->snap_name);
5346 if (snap_id == CEPH_NOSNAP)
5349 spec->snap_id = snap_id;
5351 spec->snap_id = CEPH_NOSNAP;
5358 * A parent image will have all ids but none of the names.
5360 * All names in an rbd spec are dynamically allocated. It's OK if we
5361 * can't figure out the name for an image id.
5363 static int rbd_spec_fill_names(struct rbd_device *rbd_dev)
5365 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
5366 struct rbd_spec *spec = rbd_dev->spec;
5367 const char *pool_name;
5368 const char *image_name;
5369 const char *snap_name;
5372 rbd_assert(spec->pool_id != CEPH_NOPOOL);
5373 rbd_assert(spec->image_id);
5374 rbd_assert(spec->snap_id != CEPH_NOSNAP);
5376 /* Get the pool name; we have to make our own copy of this */
5378 pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, spec->pool_id);
5380 rbd_warn(rbd_dev, "no pool with id %llu", spec->pool_id);
5383 pool_name = kstrdup(pool_name, GFP_KERNEL);
5387 /* Fetch the image name; tolerate failure here */
5389 image_name = rbd_dev_image_name(rbd_dev);
5391 rbd_warn(rbd_dev, "unable to get image name");
5393 /* Fetch the snapshot name */
5395 snap_name = rbd_snap_name(rbd_dev, spec->snap_id);
5396 if (IS_ERR(snap_name)) {
5397 ret = PTR_ERR(snap_name);
5401 spec->pool_name = pool_name;
5402 spec->image_name = image_name;
5403 spec->snap_name = snap_name;
5413 static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev)
5422 struct ceph_snap_context *snapc;
5426 * We'll need room for the seq value (maximum snapshot id),
5427 * snapshot count, and array of that many snapshot ids.
5428 * For now we have a fixed upper limit on the number we're
5429 * prepared to receive.
5431 size = sizeof (__le64) + sizeof (__le32) +
5432 RBD_MAX_SNAP_COUNT * sizeof (__le64);
5433 reply_buf = kzalloc(size, GFP_KERNEL);
5437 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_oid.name,
5438 "rbd", "get_snapcontext", NULL, 0,
5440 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
5445 end = reply_buf + ret;
5447 ceph_decode_64_safe(&p, end, seq, out);
5448 ceph_decode_32_safe(&p, end, snap_count, out);
5451 * Make sure the reported number of snapshot ids wouldn't go
5452 * beyond the end of our buffer. But before checking that,
5453 * make sure the computed size of the snapshot context we
5454 * allocate is representable in a size_t.
5456 if (snap_count > (SIZE_MAX - sizeof (struct ceph_snap_context))
5461 if (!ceph_has_room(&p, end, snap_count * sizeof (__le64)))
5465 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
5471 for (i = 0; i < snap_count; i++)
5472 snapc->snaps[i] = ceph_decode_64(&p);
5474 ceph_put_snap_context(rbd_dev->header.snapc);
5475 rbd_dev->header.snapc = snapc;
5477 dout(" snap context seq = %llu, snap_count = %u\n",
5478 (unsigned long long)seq, (unsigned int)snap_count);
5485 static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
5496 size = sizeof (__le32) + RBD_MAX_SNAP_NAME_LEN;
5497 reply_buf = kmalloc(size, GFP_KERNEL);
5499 return ERR_PTR(-ENOMEM);
5501 snapid = cpu_to_le64(snap_id);
5502 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_oid.name,
5503 "rbd", "get_snapshot_name",
5504 &snapid, sizeof (snapid),
5506 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
5508 snap_name = ERR_PTR(ret);
5513 end = reply_buf + ret;
5514 snap_name = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
5515 if (IS_ERR(snap_name))
5518 dout(" snap_id 0x%016llx snap_name = %s\n",
5519 (unsigned long long)snap_id, snap_name);
5526 static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev)
5528 bool first_time = rbd_dev->header.object_prefix == NULL;
5531 ret = rbd_dev_v2_image_size(rbd_dev);
5536 ret = rbd_dev_v2_header_onetime(rbd_dev);
5541 ret = rbd_dev_v2_snap_context(rbd_dev);
5542 if (ret && first_time) {
5543 kfree(rbd_dev->header.object_prefix);
5544 rbd_dev->header.object_prefix = NULL;
5550 static int rbd_dev_header_info(struct rbd_device *rbd_dev)
5552 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
5554 if (rbd_dev->image_format == 1)
5555 return rbd_dev_v1_header_info(rbd_dev);
5557 return rbd_dev_v2_header_info(rbd_dev);
5561 * Skips over white space at *buf, and updates *buf to point to the
5562 * first found non-space character (if any). Returns the length of
5563 * the token (string of non-white space characters) found. Note
5564 * that *buf must be terminated with '\0'.
5566 static inline size_t next_token(const char **buf)
5569 * These are the characters that produce nonzero for
5570 * isspace() in the "C" and "POSIX" locales.
5572 const char *spaces = " \f\n\r\t\v";
5574 *buf += strspn(*buf, spaces); /* Find start of token */
5576 return strcspn(*buf, spaces); /* Return token length */
5580 * Finds the next token in *buf, dynamically allocates a buffer big
5581 * enough to hold a copy of it, and copies the token into the new
5582 * buffer. The copy is guaranteed to be terminated with '\0'. Note
5583 * that a duplicate buffer is created even for a zero-length token.
5585 * Returns a pointer to the newly-allocated duplicate, or a null
5586 * pointer if memory for the duplicate was not available. If
5587 * the lenp argument is a non-null pointer, the length of the token
5588 * (not including the '\0') is returned in *lenp.
5590 * If successful, the *buf pointer will be updated to point beyond
5591 * the end of the found token.
5593 * Note: uses GFP_KERNEL for allocation.
5595 static inline char *dup_token(const char **buf, size_t *lenp)
5600 len = next_token(buf);
5601 dup = kmemdup(*buf, len + 1, GFP_KERNEL);
5604 *(dup + len) = '\0';
5614 * Parse the options provided for an "rbd add" (i.e., rbd image
5615 * mapping) request. These arrive via a write to /sys/bus/rbd/add,
5616 * and the data written is passed here via a NUL-terminated buffer.
5617 * Returns 0 if successful or an error code otherwise.
5619 * The information extracted from these options is recorded in
5620 * the other parameters which return dynamically-allocated
5623 * The address of a pointer that will refer to a ceph options
5624 * structure. Caller must release the returned pointer using
5625 * ceph_destroy_options() when it is no longer needed.
5627 * Address of an rbd options pointer. Fully initialized by
5628 * this function; caller must release with kfree().
5630 * Address of an rbd image specification pointer. Fully
5631 * initialized by this function based on parsed options.
5632 * Caller must release with rbd_spec_put().
5634 * The options passed take this form:
5635 * <mon_addrs> <options> <pool_name> <image_name> [<snap_id>]
5638 * A comma-separated list of one or more monitor addresses.
5639 * A monitor address is an ip address, optionally followed
5640 * by a port number (separated by a colon).
5641 * I.e.: ip1[:port1][,ip2[:port2]...]
5643 * A comma-separated list of ceph and/or rbd options.
5645 * The name of the rados pool containing the rbd image.
5647 * The name of the image in that pool to map.
5649 * An optional snapshot id. If provided, the mapping will
5650 * present data from the image at the time that snapshot was
5651 * created. The image head is used if no snapshot id is
5652 * provided. Snapshot mappings are always read-only.
5654 static int rbd_add_parse_args(const char *buf,
5655 struct ceph_options **ceph_opts,
5656 struct rbd_options **opts,
5657 struct rbd_spec **rbd_spec)
5661 const char *mon_addrs;
5663 size_t mon_addrs_size;
5664 struct rbd_spec *spec = NULL;
5665 struct rbd_options *rbd_opts = NULL;
5666 struct ceph_options *copts;
5669 /* The first four tokens are required */
5671 len = next_token(&buf);
5673 rbd_warn(NULL, "no monitor address(es) provided");
5677 mon_addrs_size = len + 1;
5681 options = dup_token(&buf, NULL);
5685 rbd_warn(NULL, "no options provided");
5689 spec = rbd_spec_alloc();
5693 spec->pool_name = dup_token(&buf, NULL);
5694 if (!spec->pool_name)
5696 if (!*spec->pool_name) {
5697 rbd_warn(NULL, "no pool name provided");
5701 spec->image_name = dup_token(&buf, NULL);
5702 if (!spec->image_name)
5704 if (!*spec->image_name) {
5705 rbd_warn(NULL, "no image name provided");
5710 * Snapshot name is optional; default is to use "-"
5711 * (indicating the head/no snapshot).
5713 len = next_token(&buf);
5715 buf = RBD_SNAP_HEAD_NAME; /* No snapshot supplied */
5716 len = sizeof (RBD_SNAP_HEAD_NAME) - 1;
5717 } else if (len > RBD_MAX_SNAP_NAME_LEN) {
5718 ret = -ENAMETOOLONG;
5721 snap_name = kmemdup(buf, len + 1, GFP_KERNEL);
5724 *(snap_name + len) = '\0';
5725 spec->snap_name = snap_name;
5727 /* Initialize all rbd options to the defaults */
5729 rbd_opts = kzalloc(sizeof (*rbd_opts), GFP_KERNEL);
5733 rbd_opts->read_only = RBD_READ_ONLY_DEFAULT;
5734 rbd_opts->queue_depth = RBD_QUEUE_DEPTH_DEFAULT;
5735 rbd_opts->lock_on_read = RBD_LOCK_ON_READ_DEFAULT;
5737 copts = ceph_parse_options(options, mon_addrs,
5738 mon_addrs + mon_addrs_size - 1,
5739 parse_rbd_opts_token, rbd_opts);
5740 if (IS_ERR(copts)) {
5741 ret = PTR_ERR(copts);
5762 * Return pool id (>= 0) or a negative error code.
5764 static int rbd_add_get_pool_id(struct rbd_client *rbdc, const char *pool_name)
5766 struct ceph_options *opts = rbdc->client->options;
5772 ret = ceph_pg_poolid_by_name(rbdc->client->osdc.osdmap, pool_name);
5773 if (ret == -ENOENT && tries++ < 1) {
5774 ret = ceph_monc_get_version(&rbdc->client->monc, "osdmap",
5779 if (rbdc->client->osdc.osdmap->epoch < newest_epoch) {
5780 ceph_osdc_maybe_request_map(&rbdc->client->osdc);
5781 (void) ceph_monc_wait_osdmap(&rbdc->client->monc,
5783 opts->mount_timeout);
5786 /* the osdmap we have is new enough */
5795 * An rbd format 2 image has a unique identifier, distinct from the
5796 * name given to it by the user. Internally, that identifier is
5797 * what's used to specify the names of objects related to the image.
5799 * A special "rbd id" object is used to map an rbd image name to its
5800 * id. If that object doesn't exist, then there is no v2 rbd image
5801 * with the supplied name.
5803 * This function will record the given rbd_dev's image_id field if
5804 * it can be determined, and in that case will return 0. If any
5805 * errors occur a negative errno will be returned and the rbd_dev's
5806 * image_id field will be unchanged (and should be NULL).
5808 static int rbd_dev_image_id(struct rbd_device *rbd_dev)
5817 * When probing a parent image, the image id is already
5818 * known (and the image name likely is not). There's no
5819 * need to fetch the image id again in this case. We
5820 * do still need to set the image format though.
5822 if (rbd_dev->spec->image_id) {
5823 rbd_dev->image_format = *rbd_dev->spec->image_id ? 2 : 1;
5829 * First, see if the format 2 image id file exists, and if
5830 * so, get the image's persistent id from it.
5832 size = sizeof (RBD_ID_PREFIX) + strlen(rbd_dev->spec->image_name);
5833 object_name = kmalloc(size, GFP_NOIO);
5836 sprintf(object_name, "%s%s", RBD_ID_PREFIX, rbd_dev->spec->image_name);
5837 dout("rbd id object name is %s\n", object_name);
5839 /* Response will be an encoded string, which includes a length */
5841 size = sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX;
5842 response = kzalloc(size, GFP_NOIO);
5848 /* If it doesn't exist we'll assume it's a format 1 image */
5850 ret = rbd_obj_method_sync(rbd_dev, object_name,
5851 "rbd", "get_id", NULL, 0,
5852 response, RBD_IMAGE_ID_LEN_MAX);
5853 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
5854 if (ret == -ENOENT) {
5855 image_id = kstrdup("", GFP_KERNEL);
5856 ret = image_id ? 0 : -ENOMEM;
5858 rbd_dev->image_format = 1;
5859 } else if (ret >= 0) {
5862 image_id = ceph_extract_encoded_string(&p, p + ret,
5864 ret = PTR_ERR_OR_ZERO(image_id);
5866 rbd_dev->image_format = 2;
5870 rbd_dev->spec->image_id = image_id;
5871 dout("image_id is %s\n", image_id);
5881 * Undo whatever state changes are made by v1 or v2 header info
5884 static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
5886 struct rbd_image_header *header;
5888 rbd_dev_parent_put(rbd_dev);
5890 /* Free dynamic fields from the header, then zero it out */
5892 header = &rbd_dev->header;
5893 ceph_put_snap_context(header->snapc);
5894 kfree(header->snap_sizes);
5895 kfree(header->snap_names);
5896 kfree(header->object_prefix);
5897 memset(header, 0, sizeof (*header));
5900 static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev)
5904 ret = rbd_dev_v2_object_prefix(rbd_dev);
5909 * Get the and check features for the image. Currently the
5910 * features are assumed to never change.
5912 ret = rbd_dev_v2_features(rbd_dev);
5916 /* If the image supports fancy striping, get its parameters */
5918 if (rbd_dev->header.features & RBD_FEATURE_STRIPINGV2) {
5919 ret = rbd_dev_v2_striping_info(rbd_dev);
5923 /* No support for crypto and compression type format 2 images */
5927 rbd_dev->header.features = 0;
5928 kfree(rbd_dev->header.object_prefix);
5929 rbd_dev->header.object_prefix = NULL;
5935 * @depth is rbd_dev_image_probe() -> rbd_dev_probe_parent() ->
5936 * rbd_dev_image_probe() recursion depth, which means it's also the
5937 * length of the already discovered part of the parent chain.
5939 static int rbd_dev_probe_parent(struct rbd_device *rbd_dev, int depth)
5941 struct rbd_device *parent = NULL;
5944 if (!rbd_dev->parent_spec)
5947 if (++depth > RBD_MAX_PARENT_CHAIN_LEN) {
5948 pr_info("parent chain is too long (%d)\n", depth);
5953 parent = __rbd_dev_create(rbd_dev->rbd_client, rbd_dev->parent_spec);
5960 * Images related by parent/child relationships always share
5961 * rbd_client and spec/parent_spec, so bump their refcounts.
5963 __rbd_get_client(rbd_dev->rbd_client);
5964 rbd_spec_get(rbd_dev->parent_spec);
5966 ret = rbd_dev_image_probe(parent, depth);
5970 rbd_dev->parent = parent;
5971 atomic_set(&rbd_dev->parent_ref, 1);
5975 rbd_dev_unparent(rbd_dev);
5976 rbd_dev_destroy(parent);
5981 * rbd_dev->header_rwsem must be locked for write and will be unlocked
5984 static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
5988 /* Record our major and minor device numbers. */
5990 if (!single_major) {
5991 ret = register_blkdev(0, rbd_dev->name);
5993 goto err_out_unlock;
5995 rbd_dev->major = ret;
5998 rbd_dev->major = rbd_major;
5999 rbd_dev->minor = rbd_dev_id_to_minor(rbd_dev->dev_id);
6002 /* Set up the blkdev mapping. */
6004 ret = rbd_init_disk(rbd_dev);
6006 goto err_out_blkdev;
6008 ret = rbd_dev_mapping_set(rbd_dev);
6012 set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
6013 set_disk_ro(rbd_dev->disk, rbd_dev->mapping.read_only);
6015 dev_set_name(&rbd_dev->dev, "%d", rbd_dev->dev_id);
6016 ret = device_add(&rbd_dev->dev);
6018 goto err_out_mapping;
6020 /* Everything's ready. Announce the disk to the world. */
6022 set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
6023 up_write(&rbd_dev->header_rwsem);
6025 spin_lock(&rbd_dev_list_lock);
6026 list_add_tail(&rbd_dev->node, &rbd_dev_list);
6027 spin_unlock(&rbd_dev_list_lock);
6029 add_disk(rbd_dev->disk);
6030 pr_info("%s: capacity %llu features 0x%llx\n", rbd_dev->disk->disk_name,
6031 (unsigned long long)get_capacity(rbd_dev->disk) << SECTOR_SHIFT,
6032 rbd_dev->header.features);
6037 rbd_dev_mapping_clear(rbd_dev);
6039 rbd_free_disk(rbd_dev);
6042 unregister_blkdev(rbd_dev->major, rbd_dev->name);
6044 up_write(&rbd_dev->header_rwsem);
6048 static int rbd_dev_header_name(struct rbd_device *rbd_dev)
6050 struct rbd_spec *spec = rbd_dev->spec;
6053 /* Record the header object name for this rbd image. */
6055 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
6057 rbd_dev->header_oloc.pool = rbd_dev->layout.pool_id;
6058 if (rbd_dev->image_format == 1)
6059 ret = ceph_oid_aprintf(&rbd_dev->header_oid, GFP_KERNEL, "%s%s",
6060 spec->image_name, RBD_SUFFIX);
6062 ret = ceph_oid_aprintf(&rbd_dev->header_oid, GFP_KERNEL, "%s%s",
6063 RBD_HEADER_PREFIX, spec->image_id);
6068 static void rbd_dev_image_release(struct rbd_device *rbd_dev)
6070 rbd_dev_unprobe(rbd_dev);
6071 rbd_dev->image_format = 0;
6072 kfree(rbd_dev->spec->image_id);
6073 rbd_dev->spec->image_id = NULL;
6075 rbd_dev_destroy(rbd_dev);
6079 * Probe for the existence of the header object for the given rbd
6080 * device. If this image is the one being mapped (i.e., not a
6081 * parent), initiate a watch on its header object before using that
6082 * object to get detailed information about the rbd image.
6084 static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth)
6089 * Get the id from the image id object. Unless there's an
6090 * error, rbd_dev->spec->image_id will be filled in with
6091 * a dynamically-allocated string, and rbd_dev->image_format
6092 * will be set to either 1 or 2.
6094 ret = rbd_dev_image_id(rbd_dev);
6098 ret = rbd_dev_header_name(rbd_dev);
6100 goto err_out_format;
6103 ret = rbd_register_watch(rbd_dev);
6106 pr_info("image %s/%s does not exist\n",
6107 rbd_dev->spec->pool_name,
6108 rbd_dev->spec->image_name);
6109 goto err_out_format;
6113 ret = rbd_dev_header_info(rbd_dev);
6118 * If this image is the one being mapped, we have pool name and
6119 * id, image name and id, and snap name - need to fill snap id.
6120 * Otherwise this is a parent image, identified by pool, image
6121 * and snap ids - need to fill in names for those ids.
6124 ret = rbd_spec_fill_snap_id(rbd_dev);
6126 ret = rbd_spec_fill_names(rbd_dev);
6129 pr_info("snap %s/%s@%s does not exist\n",
6130 rbd_dev->spec->pool_name,
6131 rbd_dev->spec->image_name,
6132 rbd_dev->spec->snap_name);
6136 if (rbd_dev->header.features & RBD_FEATURE_LAYERING) {
6137 ret = rbd_dev_v2_parent_info(rbd_dev);
6142 * Need to warn users if this image is the one being
6143 * mapped and has a parent.
6145 if (!depth && rbd_dev->parent_spec)
6147 "WARNING: kernel layering is EXPERIMENTAL!");
6150 ret = rbd_dev_probe_parent(rbd_dev, depth);
6154 dout("discovered format %u image, header name is %s\n",
6155 rbd_dev->image_format, rbd_dev->header_oid.name);
6159 rbd_dev_unprobe(rbd_dev);
6162 rbd_unregister_watch(rbd_dev);
6164 rbd_dev->image_format = 0;
6165 kfree(rbd_dev->spec->image_id);
6166 rbd_dev->spec->image_id = NULL;
6170 static ssize_t do_rbd_add(struct bus_type *bus,
6174 struct rbd_device *rbd_dev = NULL;
6175 struct ceph_options *ceph_opts = NULL;
6176 struct rbd_options *rbd_opts = NULL;
6177 struct rbd_spec *spec = NULL;
6178 struct rbd_client *rbdc;
6182 if (!try_module_get(THIS_MODULE))
6185 /* parse add command */
6186 rc = rbd_add_parse_args(buf, &ceph_opts, &rbd_opts, &spec);
6190 rbdc = rbd_get_client(ceph_opts);
6197 rc = rbd_add_get_pool_id(rbdc, spec->pool_name);
6200 pr_info("pool %s does not exist\n", spec->pool_name);
6201 goto err_out_client;
6203 spec->pool_id = (u64)rc;
6205 rbd_dev = rbd_dev_create(rbdc, spec, rbd_opts);
6208 goto err_out_client;
6210 rbdc = NULL; /* rbd_dev now owns this */
6211 spec = NULL; /* rbd_dev now owns this */
6212 rbd_opts = NULL; /* rbd_dev now owns this */
6214 rbd_dev->config_info = kstrdup(buf, GFP_KERNEL);
6215 if (!rbd_dev->config_info) {
6217 goto err_out_rbd_dev;
6220 down_write(&rbd_dev->header_rwsem);
6221 rc = rbd_dev_image_probe(rbd_dev, 0);
6223 up_write(&rbd_dev->header_rwsem);
6224 goto err_out_rbd_dev;
6227 /* If we are mapping a snapshot it must be marked read-only */
6229 read_only = rbd_dev->opts->read_only;
6230 if (rbd_dev->spec->snap_id != CEPH_NOSNAP)
6232 rbd_dev->mapping.read_only = read_only;
6234 rc = rbd_dev_device_setup(rbd_dev);
6237 * rbd_unregister_watch() can't be moved into
6238 * rbd_dev_image_release() without refactoring, see
6239 * commit 1f3ef78861ac.
6241 rbd_unregister_watch(rbd_dev);
6242 rbd_dev_image_release(rbd_dev);
6248 module_put(THIS_MODULE);
6252 rbd_dev_destroy(rbd_dev);
6254 rbd_put_client(rbdc);
6261 static ssize_t rbd_add(struct bus_type *bus,
6268 return do_rbd_add(bus, buf, count);
6271 static ssize_t rbd_add_single_major(struct bus_type *bus,
6275 return do_rbd_add(bus, buf, count);
6278 static void rbd_dev_device_release(struct rbd_device *rbd_dev)
6280 rbd_free_disk(rbd_dev);
6282 spin_lock(&rbd_dev_list_lock);
6283 list_del_init(&rbd_dev->node);
6284 spin_unlock(&rbd_dev_list_lock);
6286 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
6287 device_del(&rbd_dev->dev);
6288 rbd_dev_mapping_clear(rbd_dev);
6290 unregister_blkdev(rbd_dev->major, rbd_dev->name);
6293 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev)
6295 while (rbd_dev->parent) {
6296 struct rbd_device *first = rbd_dev;
6297 struct rbd_device *second = first->parent;
6298 struct rbd_device *third;
6301 * Follow to the parent with no grandparent and
6304 while (second && (third = second->parent)) {
6309 rbd_dev_image_release(second);
6310 first->parent = NULL;
6311 first->parent_overlap = 0;
6313 rbd_assert(first->parent_spec);
6314 rbd_spec_put(first->parent_spec);
6315 first->parent_spec = NULL;
6319 static ssize_t do_rbd_remove(struct bus_type *bus,
6323 struct rbd_device *rbd_dev = NULL;
6324 struct list_head *tmp;
6327 bool already = false;
6333 sscanf(buf, "%d %5s", &dev_id, opt_buf);
6335 pr_err("dev_id out of range\n");
6338 if (opt_buf[0] != '\0') {
6339 if (!strcmp(opt_buf, "force")) {
6342 pr_err("bad remove option at '%s'\n", opt_buf);
6348 spin_lock(&rbd_dev_list_lock);
6349 list_for_each(tmp, &rbd_dev_list) {
6350 rbd_dev = list_entry(tmp, struct rbd_device, node);
6351 if (rbd_dev->dev_id == dev_id) {
6357 spin_lock_irq(&rbd_dev->lock);
6358 if (rbd_dev->open_count && !force)
6361 already = test_and_set_bit(RBD_DEV_FLAG_REMOVING,
6363 spin_unlock_irq(&rbd_dev->lock);
6365 spin_unlock(&rbd_dev_list_lock);
6366 if (ret < 0 || already)
6371 * Prevent new IO from being queued and wait for existing
6372 * IO to complete/fail.
6374 blk_mq_freeze_queue(rbd_dev->disk->queue);
6375 blk_set_queue_dying(rbd_dev->disk->queue);
6378 down_write(&rbd_dev->lock_rwsem);
6379 if (__rbd_is_lock_owner(rbd_dev))
6380 rbd_unlock(rbd_dev);
6381 up_write(&rbd_dev->lock_rwsem);
6382 rbd_unregister_watch(rbd_dev);
6385 * Don't free anything from rbd_dev->disk until after all
6386 * notifies are completely processed. Otherwise
6387 * rbd_bus_del_dev() will race with rbd_watch_cb(), resulting
6388 * in a potential use after free of rbd_dev->disk or rbd_dev.
6390 rbd_dev_device_release(rbd_dev);
6391 rbd_dev_image_release(rbd_dev);
6396 static ssize_t rbd_remove(struct bus_type *bus,
6403 return do_rbd_remove(bus, buf, count);
6406 static ssize_t rbd_remove_single_major(struct bus_type *bus,
6410 return do_rbd_remove(bus, buf, count);
6414 * create control files in sysfs
6417 static int rbd_sysfs_init(void)
6421 ret = device_register(&rbd_root_dev);
6425 ret = bus_register(&rbd_bus_type);
6427 device_unregister(&rbd_root_dev);
6432 static void rbd_sysfs_cleanup(void)
6434 bus_unregister(&rbd_bus_type);
6435 device_unregister(&rbd_root_dev);
6438 static int rbd_slab_init(void)
6440 rbd_assert(!rbd_img_request_cache);
6441 rbd_img_request_cache = KMEM_CACHE(rbd_img_request, 0);
6442 if (!rbd_img_request_cache)
6445 rbd_assert(!rbd_obj_request_cache);
6446 rbd_obj_request_cache = KMEM_CACHE(rbd_obj_request, 0);
6447 if (!rbd_obj_request_cache)
6450 rbd_assert(!rbd_segment_name_cache);
6451 rbd_segment_name_cache = kmem_cache_create("rbd_segment_name",
6452 CEPH_MAX_OID_NAME_LEN + 1, 1, 0, NULL);
6453 if (rbd_segment_name_cache)
6456 kmem_cache_destroy(rbd_obj_request_cache);
6457 rbd_obj_request_cache = NULL;
6459 kmem_cache_destroy(rbd_img_request_cache);
6460 rbd_img_request_cache = NULL;
6465 static void rbd_slab_exit(void)
6467 rbd_assert(rbd_segment_name_cache);
6468 kmem_cache_destroy(rbd_segment_name_cache);
6469 rbd_segment_name_cache = NULL;
6471 rbd_assert(rbd_obj_request_cache);
6472 kmem_cache_destroy(rbd_obj_request_cache);
6473 rbd_obj_request_cache = NULL;
6475 rbd_assert(rbd_img_request_cache);
6476 kmem_cache_destroy(rbd_img_request_cache);
6477 rbd_img_request_cache = NULL;
6480 static int __init rbd_init(void)
6484 if (!libceph_compatible(NULL)) {
6485 rbd_warn(NULL, "libceph incompatibility (quitting)");
6489 rc = rbd_slab_init();
6494 * The number of active work items is limited by the number of
6495 * rbd devices * queue depth, so leave @max_active at default.
6497 rbd_wq = alloc_workqueue(RBD_DRV_NAME, WQ_MEM_RECLAIM, 0);
6504 rbd_major = register_blkdev(0, RBD_DRV_NAME);
6505 if (rbd_major < 0) {
6511 rc = rbd_sysfs_init();
6513 goto err_out_blkdev;
6516 pr_info("loaded (major %d)\n", rbd_major);
6518 pr_info("loaded\n");
6524 unregister_blkdev(rbd_major, RBD_DRV_NAME);
6526 destroy_workqueue(rbd_wq);
6532 static void __exit rbd_exit(void)
6534 ida_destroy(&rbd_dev_id_ida);
6535 rbd_sysfs_cleanup();
6537 unregister_blkdev(rbd_major, RBD_DRV_NAME);
6538 destroy_workqueue(rbd_wq);
6542 module_init(rbd_init);
6543 module_exit(rbd_exit);
6545 MODULE_AUTHOR("Alex Elder <elder@inktank.com>");
6546 MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
6547 MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
6548 /* following authorship retained from original osdblk.c */
6549 MODULE_AUTHOR("Jeff Garzik <jeff@garzik.org>");
6551 MODULE_DESCRIPTION("RADOS Block Device (RBD) driver");
6552 MODULE_LICENSE("GPL");