3 rbd.c -- Export ceph rados objects as a Linux block device
6 based on drivers/block/osdblk.c:
8 Copyright 2009 Red Hat, Inc.
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program; see the file COPYING. If not, write to
21 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 For usage instructions, please refer to:
27 Documentation/ABI/testing/sysfs-bus-rbd
31 #include <linux/ceph/libceph.h>
32 #include <linux/ceph/osd_client.h>
33 #include <linux/ceph/mon_client.h>
34 #include <linux/ceph/cls_lock_client.h>
35 #include <linux/ceph/decode.h>
36 #include <linux/parser.h>
37 #include <linux/bsearch.h>
39 #include <linux/kernel.h>
40 #include <linux/device.h>
41 #include <linux/module.h>
42 #include <linux/blk-mq.h>
44 #include <linux/blkdev.h>
45 #include <linux/slab.h>
46 #include <linux/idr.h>
47 #include <linux/workqueue.h>
49 #include "rbd_types.h"
51 #define RBD_DEBUG /* Activate rbd_assert() calls */
54 * The basic unit of block I/O is a sector. It is interpreted in a
55 * number of contexts in Linux (blk, bio, genhd), but the default is
56 * universally 512 bytes. These symbols are just slightly more
57 * meaningful than the bare numbers they represent.
59 #define SECTOR_SHIFT 9
60 #define SECTOR_SIZE (1ULL << SECTOR_SHIFT)
63 * Increment the given counter and return its updated value.
64 * If the counter is already 0 it will not be incremented.
65 * If the counter is already at its maximum value returns
66 * -EINVAL without updating it.
68 static int atomic_inc_return_safe(atomic_t *v)
72 counter = (unsigned int)__atomic_add_unless(v, 1, 0);
73 if (counter <= (unsigned int)INT_MAX)
81 /* Decrement the counter. Return the resulting value, or -EINVAL */
82 static int atomic_dec_return_safe(atomic_t *v)
86 counter = atomic_dec_return(v);
95 #define RBD_DRV_NAME "rbd"
97 #define RBD_MINORS_PER_MAJOR 256
98 #define RBD_SINGLE_MAJOR_PART_SHIFT 4
100 #define RBD_MAX_PARENT_CHAIN_LEN 16
102 #define RBD_SNAP_DEV_NAME_PREFIX "snap_"
103 #define RBD_MAX_SNAP_NAME_LEN \
104 (NAME_MAX - (sizeof (RBD_SNAP_DEV_NAME_PREFIX) - 1))
106 #define RBD_MAX_SNAP_COUNT 510 /* allows max snapc to fit in 4KB */
108 #define RBD_SNAP_HEAD_NAME "-"
110 #define BAD_SNAP_INDEX U32_MAX /* invalid index into snap array */
112 /* This allows a single page to hold an image name sent by OSD */
113 #define RBD_IMAGE_NAME_LEN_MAX (PAGE_SIZE - sizeof (__le32) - 1)
114 #define RBD_IMAGE_ID_LEN_MAX 64
116 #define RBD_OBJ_PREFIX_LEN_MAX 64
118 #define RBD_NOTIFY_TIMEOUT 5 /* seconds */
119 #define RBD_RETRY_DELAY msecs_to_jiffies(1000)
123 #define RBD_FEATURE_LAYERING (1<<0)
124 #define RBD_FEATURE_STRIPINGV2 (1<<1)
125 #define RBD_FEATURE_EXCLUSIVE_LOCK (1<<2)
126 #define RBD_FEATURES_ALL (RBD_FEATURE_LAYERING | \
127 RBD_FEATURE_STRIPINGV2 | \
128 RBD_FEATURE_EXCLUSIVE_LOCK)
130 /* Features supported by this (client software) implementation. */
132 #define RBD_FEATURES_SUPPORTED (RBD_FEATURES_ALL)
135 * An RBD device name will be "rbd#", where the "rbd" comes from
136 * RBD_DRV_NAME above, and # is a unique integer identifier.
138 #define DEV_NAME_LEN 32
141 * block device image metadata (in-memory version)
143 struct rbd_image_header {
144 /* These six fields never change for a given rbd image */
151 u64 features; /* Might be changeable someday? */
153 /* The remaining fields need to be updated occasionally */
155 struct ceph_snap_context *snapc;
156 char *snap_names; /* format 1 only */
157 u64 *snap_sizes; /* format 1 only */
161 * An rbd image specification.
163 * The tuple (pool_id, image_id, snap_id) is sufficient to uniquely
164 * identify an image. Each rbd_dev structure includes a pointer to
165 * an rbd_spec structure that encapsulates this identity.
167 * Each of the id's in an rbd_spec has an associated name. For a
168 * user-mapped image, the names are supplied and the id's associated
169 * with them are looked up. For a layered image, a parent image is
170 * defined by the tuple, and the names are looked up.
172 * An rbd_dev structure contains a parent_spec pointer which is
173 * non-null if the image it represents is a child in a layered
174 * image. This pointer will refer to the rbd_spec structure used
175 * by the parent rbd_dev for its own identity (i.e., the structure
176 * is shared between the parent and child).
178 * Since these structures are populated once, during the discovery
179 * phase of image construction, they are effectively immutable so
180 * we make no effort to synchronize access to them.
182 * Note that code herein does not assume the image name is known (it
183 * could be a null pointer).
187 const char *pool_name;
189 const char *image_id;
190 const char *image_name;
193 const char *snap_name;
199 * an instance of the client. multiple devices may share an rbd client.
202 struct ceph_client *client;
204 struct list_head node;
207 struct rbd_img_request;
208 typedef void (*rbd_img_callback_t)(struct rbd_img_request *);
210 #define BAD_WHICH U32_MAX /* Good which or bad which, which? */
212 struct rbd_obj_request;
213 typedef void (*rbd_obj_callback_t)(struct rbd_obj_request *);
215 enum obj_request_type {
216 OBJ_REQUEST_NODATA, OBJ_REQUEST_BIO, OBJ_REQUEST_PAGES
219 enum obj_operation_type {
226 OBJ_REQ_DONE, /* completion flag: not done = 0, done = 1 */
227 OBJ_REQ_IMG_DATA, /* object usage: standalone = 0, image = 1 */
228 OBJ_REQ_KNOWN, /* EXISTS flag valid: no = 0, yes = 1 */
229 OBJ_REQ_EXISTS, /* target exists: no = 0, yes = 1 */
232 struct rbd_obj_request {
233 const char *object_name;
234 u64 offset; /* object start byte */
235 u64 length; /* bytes from offset */
239 * An object request associated with an image will have its
240 * img_data flag set; a standalone object request will not.
242 * A standalone object request will have which == BAD_WHICH
243 * and a null obj_request pointer.
245 * An object request initiated in support of a layered image
246 * object (to check for its existence before a write) will
247 * have which == BAD_WHICH and a non-null obj_request pointer.
249 * Finally, an object request for rbd image data will have
250 * which != BAD_WHICH, and will have a non-null img_request
251 * pointer. The value of which will be in the range
252 * 0..(img_request->obj_request_count-1).
255 struct rbd_obj_request *obj_request; /* STAT op */
257 struct rbd_img_request *img_request;
259 /* links for img_request->obj_requests list */
260 struct list_head links;
263 u32 which; /* posn image request list */
265 enum obj_request_type type;
267 struct bio *bio_list;
273 struct page **copyup_pages;
274 u32 copyup_page_count;
276 struct ceph_osd_request *osd_req;
278 u64 xferred; /* bytes transferred */
281 rbd_obj_callback_t callback;
282 struct completion completion;
288 IMG_REQ_WRITE, /* I/O direction: read = 0, write = 1 */
289 IMG_REQ_CHILD, /* initiator: block = 0, child image = 1 */
290 IMG_REQ_LAYERED, /* ENOENT handling: normal = 0, layered = 1 */
291 IMG_REQ_DISCARD, /* discard: normal = 0, discard request = 1 */
294 struct rbd_img_request {
295 struct rbd_device *rbd_dev;
296 u64 offset; /* starting image byte offset */
297 u64 length; /* byte count from offset */
300 u64 snap_id; /* for reads */
301 struct ceph_snap_context *snapc; /* for writes */
304 struct request *rq; /* block request */
305 struct rbd_obj_request *obj_request; /* obj req initiator */
307 struct page **copyup_pages;
308 u32 copyup_page_count;
309 spinlock_t completion_lock;/* protects next_completion */
311 rbd_img_callback_t callback;
312 u64 xferred;/* aggregate bytes transferred */
313 int result; /* first nonzero obj_request result */
315 u32 obj_request_count;
316 struct list_head obj_requests; /* rbd_obj_request structs */
321 #define for_each_obj_request(ireq, oreq) \
322 list_for_each_entry(oreq, &(ireq)->obj_requests, links)
323 #define for_each_obj_request_from(ireq, oreq) \
324 list_for_each_entry_from(oreq, &(ireq)->obj_requests, links)
325 #define for_each_obj_request_safe(ireq, oreq, n) \
326 list_for_each_entry_safe_reverse(oreq, n, &(ireq)->obj_requests, links)
328 enum rbd_watch_state {
329 RBD_WATCH_STATE_UNREGISTERED,
330 RBD_WATCH_STATE_REGISTERED,
331 RBD_WATCH_STATE_ERROR,
334 enum rbd_lock_state {
335 RBD_LOCK_STATE_UNLOCKED,
336 RBD_LOCK_STATE_LOCKED,
337 RBD_LOCK_STATE_RELEASING,
340 /* WatchNotify::ClientId */
341 struct rbd_client_id {
356 int dev_id; /* blkdev unique id */
358 int major; /* blkdev assigned major */
360 struct gendisk *disk; /* blkdev's gendisk and rq */
362 u32 image_format; /* Either 1 or 2 */
363 struct rbd_client *rbd_client;
365 char name[DEV_NAME_LEN]; /* blkdev name, e.g. rbd3 */
367 spinlock_t lock; /* queue, flags, open_count */
369 struct rbd_image_header header;
370 unsigned long flags; /* possibly lock protected */
371 struct rbd_spec *spec;
372 struct rbd_options *opts;
373 char *config_info; /* add{,_single_major} string */
375 struct ceph_object_id header_oid;
376 struct ceph_object_locator header_oloc;
378 struct ceph_file_layout layout; /* used for all rbd requests */
380 struct mutex watch_mutex;
381 enum rbd_watch_state watch_state;
382 struct ceph_osd_linger_request *watch_handle;
384 struct delayed_work watch_dwork;
386 struct rw_semaphore lock_rwsem;
387 enum rbd_lock_state lock_state;
388 struct rbd_client_id owner_cid;
389 struct work_struct acquired_lock_work;
390 struct work_struct released_lock_work;
391 struct delayed_work lock_dwork;
392 struct work_struct unlock_work;
393 wait_queue_head_t lock_waitq;
395 struct workqueue_struct *task_wq;
397 struct rbd_spec *parent_spec;
400 struct rbd_device *parent;
402 /* Block layer tags. */
403 struct blk_mq_tag_set tag_set;
405 /* protects updating the header */
406 struct rw_semaphore header_rwsem;
408 struct rbd_mapping mapping;
410 struct list_head node;
414 unsigned long open_count; /* protected by lock */
418 * Flag bits for rbd_dev->flags. If atomicity is required,
419 * rbd_dev->lock is used to protect access.
421 * Currently, only the "removing" flag (which is coupled with the
422 * "open_count" field) requires atomic access.
425 RBD_DEV_FLAG_EXISTS, /* mapped snapshot has not been deleted */
426 RBD_DEV_FLAG_REMOVING, /* this mapping is being removed */
429 static DEFINE_MUTEX(client_mutex); /* Serialize client creation */
431 static LIST_HEAD(rbd_dev_list); /* devices */
432 static DEFINE_SPINLOCK(rbd_dev_list_lock);
434 static LIST_HEAD(rbd_client_list); /* clients */
435 static DEFINE_SPINLOCK(rbd_client_list_lock);
437 /* Slab caches for frequently-allocated structures */
439 static struct kmem_cache *rbd_img_request_cache;
440 static struct kmem_cache *rbd_obj_request_cache;
441 static struct kmem_cache *rbd_segment_name_cache;
443 static int rbd_major;
444 static DEFINE_IDA(rbd_dev_id_ida);
446 static struct workqueue_struct *rbd_wq;
449 * Default to false for now, as single-major requires >= 0.75 version of
450 * userspace rbd utility.
452 static bool single_major = false;
453 module_param(single_major, bool, S_IRUGO);
454 MODULE_PARM_DESC(single_major, "Use a single major number for all rbd devices (default: false)");
456 static int rbd_img_request_submit(struct rbd_img_request *img_request);
458 static ssize_t rbd_add(struct bus_type *bus, const char *buf,
460 static ssize_t rbd_remove(struct bus_type *bus, const char *buf,
462 static ssize_t rbd_add_single_major(struct bus_type *bus, const char *buf,
464 static ssize_t rbd_remove_single_major(struct bus_type *bus, const char *buf,
466 static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth);
467 static void rbd_spec_put(struct rbd_spec *spec);
469 static int rbd_dev_id_to_minor(int dev_id)
471 return dev_id << RBD_SINGLE_MAJOR_PART_SHIFT;
474 static int minor_to_rbd_dev_id(int minor)
476 return minor >> RBD_SINGLE_MAJOR_PART_SHIFT;
479 static bool rbd_is_lock_supported(struct rbd_device *rbd_dev)
481 return (rbd_dev->header.features & RBD_FEATURE_EXCLUSIVE_LOCK) &&
482 rbd_dev->spec->snap_id == CEPH_NOSNAP &&
483 !rbd_dev->mapping.read_only;
486 static bool __rbd_is_lock_owner(struct rbd_device *rbd_dev)
488 return rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED ||
489 rbd_dev->lock_state == RBD_LOCK_STATE_RELEASING;
492 static bool rbd_is_lock_owner(struct rbd_device *rbd_dev)
496 down_read(&rbd_dev->lock_rwsem);
497 is_lock_owner = __rbd_is_lock_owner(rbd_dev);
498 up_read(&rbd_dev->lock_rwsem);
499 return is_lock_owner;
502 static BUS_ATTR(add, S_IWUSR, NULL, rbd_add);
503 static BUS_ATTR(remove, S_IWUSR, NULL, rbd_remove);
504 static BUS_ATTR(add_single_major, S_IWUSR, NULL, rbd_add_single_major);
505 static BUS_ATTR(remove_single_major, S_IWUSR, NULL, rbd_remove_single_major);
507 static struct attribute *rbd_bus_attrs[] = {
509 &bus_attr_remove.attr,
510 &bus_attr_add_single_major.attr,
511 &bus_attr_remove_single_major.attr,
515 static umode_t rbd_bus_is_visible(struct kobject *kobj,
516 struct attribute *attr, int index)
519 (attr == &bus_attr_add_single_major.attr ||
520 attr == &bus_attr_remove_single_major.attr))
526 static const struct attribute_group rbd_bus_group = {
527 .attrs = rbd_bus_attrs,
528 .is_visible = rbd_bus_is_visible,
530 __ATTRIBUTE_GROUPS(rbd_bus);
532 static struct bus_type rbd_bus_type = {
534 .bus_groups = rbd_bus_groups,
537 static void rbd_root_dev_release(struct device *dev)
541 static struct device rbd_root_dev = {
543 .release = rbd_root_dev_release,
546 static __printf(2, 3)
547 void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...)
549 struct va_format vaf;
557 printk(KERN_WARNING "%s: %pV\n", RBD_DRV_NAME, &vaf);
558 else if (rbd_dev->disk)
559 printk(KERN_WARNING "%s: %s: %pV\n",
560 RBD_DRV_NAME, rbd_dev->disk->disk_name, &vaf);
561 else if (rbd_dev->spec && rbd_dev->spec->image_name)
562 printk(KERN_WARNING "%s: image %s: %pV\n",
563 RBD_DRV_NAME, rbd_dev->spec->image_name, &vaf);
564 else if (rbd_dev->spec && rbd_dev->spec->image_id)
565 printk(KERN_WARNING "%s: id %s: %pV\n",
566 RBD_DRV_NAME, rbd_dev->spec->image_id, &vaf);
568 printk(KERN_WARNING "%s: rbd_dev %p: %pV\n",
569 RBD_DRV_NAME, rbd_dev, &vaf);
574 #define rbd_assert(expr) \
575 if (unlikely(!(expr))) { \
576 printk(KERN_ERR "\nAssertion failure in %s() " \
578 "\trbd_assert(%s);\n\n", \
579 __func__, __LINE__, #expr); \
582 #else /* !RBD_DEBUG */
583 # define rbd_assert(expr) ((void) 0)
584 #endif /* !RBD_DEBUG */
586 static void rbd_osd_copyup_callback(struct rbd_obj_request *obj_request);
587 static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request);
588 static void rbd_img_parent_read(struct rbd_obj_request *obj_request);
589 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
591 static int rbd_dev_refresh(struct rbd_device *rbd_dev);
592 static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev);
593 static int rbd_dev_header_info(struct rbd_device *rbd_dev);
594 static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev);
595 static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
597 static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
598 u8 *order, u64 *snap_size);
599 static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
602 static int rbd_open(struct block_device *bdev, fmode_t mode)
604 struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
605 bool removing = false;
607 if ((mode & FMODE_WRITE) && rbd_dev->mapping.read_only)
610 spin_lock_irq(&rbd_dev->lock);
611 if (test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags))
614 rbd_dev->open_count++;
615 spin_unlock_irq(&rbd_dev->lock);
619 (void) get_device(&rbd_dev->dev);
624 static void rbd_release(struct gendisk *disk, fmode_t mode)
626 struct rbd_device *rbd_dev = disk->private_data;
627 unsigned long open_count_before;
629 spin_lock_irq(&rbd_dev->lock);
630 open_count_before = rbd_dev->open_count--;
631 spin_unlock_irq(&rbd_dev->lock);
632 rbd_assert(open_count_before > 0);
634 put_device(&rbd_dev->dev);
637 static int rbd_ioctl_set_ro(struct rbd_device *rbd_dev, unsigned long arg)
642 bool ro_changed = false;
644 /* get_user() may sleep, so call it before taking rbd_dev->lock */
645 if (get_user(val, (int __user *)(arg)))
648 ro = val ? true : false;
649 /* Snapshot doesn't allow to write*/
650 if (rbd_dev->spec->snap_id != CEPH_NOSNAP && !ro)
653 spin_lock_irq(&rbd_dev->lock);
654 /* prevent others open this device */
655 if (rbd_dev->open_count > 1) {
660 if (rbd_dev->mapping.read_only != ro) {
661 rbd_dev->mapping.read_only = ro;
666 spin_unlock_irq(&rbd_dev->lock);
667 /* set_disk_ro() may sleep, so call it after releasing rbd_dev->lock */
668 if (ret == 0 && ro_changed)
669 set_disk_ro(rbd_dev->disk, ro ? 1 : 0);
674 static int rbd_ioctl(struct block_device *bdev, fmode_t mode,
675 unsigned int cmd, unsigned long arg)
677 struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
682 ret = rbd_ioctl_set_ro(rbd_dev, arg);
692 static int rbd_compat_ioctl(struct block_device *bdev, fmode_t mode,
693 unsigned int cmd, unsigned long arg)
695 return rbd_ioctl(bdev, mode, cmd, arg);
697 #endif /* CONFIG_COMPAT */
699 static const struct block_device_operations rbd_bd_ops = {
700 .owner = THIS_MODULE,
702 .release = rbd_release,
705 .compat_ioctl = rbd_compat_ioctl,
710 * Initialize an rbd client instance. Success or not, this function
711 * consumes ceph_opts. Caller holds client_mutex.
713 static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts)
715 struct rbd_client *rbdc;
718 dout("%s:\n", __func__);
719 rbdc = kmalloc(sizeof(struct rbd_client), GFP_KERNEL);
723 kref_init(&rbdc->kref);
724 INIT_LIST_HEAD(&rbdc->node);
726 rbdc->client = ceph_create_client(ceph_opts, rbdc, 0, 0);
727 if (IS_ERR(rbdc->client))
729 ceph_opts = NULL; /* Now rbdc->client is responsible for ceph_opts */
731 ret = ceph_open_session(rbdc->client);
735 spin_lock(&rbd_client_list_lock);
736 list_add_tail(&rbdc->node, &rbd_client_list);
737 spin_unlock(&rbd_client_list_lock);
739 dout("%s: rbdc %p\n", __func__, rbdc);
743 ceph_destroy_client(rbdc->client);
748 ceph_destroy_options(ceph_opts);
749 dout("%s: error %d\n", __func__, ret);
754 static struct rbd_client *__rbd_get_client(struct rbd_client *rbdc)
756 kref_get(&rbdc->kref);
762 * Find a ceph client with specific addr and configuration. If
763 * found, bump its reference count.
765 static struct rbd_client *rbd_client_find(struct ceph_options *ceph_opts)
767 struct rbd_client *client_node;
770 if (ceph_opts->flags & CEPH_OPT_NOSHARE)
773 spin_lock(&rbd_client_list_lock);
774 list_for_each_entry(client_node, &rbd_client_list, node) {
775 if (!ceph_compare_options(ceph_opts, client_node->client)) {
776 __rbd_get_client(client_node);
782 spin_unlock(&rbd_client_list_lock);
784 return found ? client_node : NULL;
788 * (Per device) rbd map options
795 /* string args above */
802 static match_table_t rbd_opts_tokens = {
803 {Opt_queue_depth, "queue_depth=%d"},
805 /* string args above */
806 {Opt_read_only, "read_only"},
807 {Opt_read_only, "ro"}, /* Alternate spelling */
808 {Opt_read_write, "read_write"},
809 {Opt_read_write, "rw"}, /* Alternate spelling */
810 {Opt_lock_on_read, "lock_on_read"},
820 #define RBD_QUEUE_DEPTH_DEFAULT BLKDEV_MAX_RQ
821 #define RBD_READ_ONLY_DEFAULT false
822 #define RBD_LOCK_ON_READ_DEFAULT false
824 static int parse_rbd_opts_token(char *c, void *private)
826 struct rbd_options *rbd_opts = private;
827 substring_t argstr[MAX_OPT_ARGS];
828 int token, intval, ret;
830 token = match_token(c, rbd_opts_tokens, argstr);
831 if (token < Opt_last_int) {
832 ret = match_int(&argstr[0], &intval);
834 pr_err("bad mount option arg (not int) at '%s'\n", c);
837 dout("got int token %d val %d\n", token, intval);
838 } else if (token > Opt_last_int && token < Opt_last_string) {
839 dout("got string token %d val %s\n", token, argstr[0].from);
841 dout("got token %d\n", token);
845 case Opt_queue_depth:
847 pr_err("queue_depth out of range\n");
850 rbd_opts->queue_depth = intval;
853 rbd_opts->read_only = true;
856 rbd_opts->read_only = false;
858 case Opt_lock_on_read:
859 rbd_opts->lock_on_read = true;
862 /* libceph prints "bad option" msg */
869 static char* obj_op_name(enum obj_operation_type op_type)
884 * Get a ceph client with specific addr and configuration, if one does
885 * not exist create it. Either way, ceph_opts is consumed by this
888 static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts)
890 struct rbd_client *rbdc;
892 mutex_lock_nested(&client_mutex, SINGLE_DEPTH_NESTING);
893 rbdc = rbd_client_find(ceph_opts);
894 if (rbdc) /* using an existing client */
895 ceph_destroy_options(ceph_opts);
897 rbdc = rbd_client_create(ceph_opts);
898 mutex_unlock(&client_mutex);
904 * Destroy ceph client
906 * Caller must hold rbd_client_list_lock.
908 static void rbd_client_release(struct kref *kref)
910 struct rbd_client *rbdc = container_of(kref, struct rbd_client, kref);
912 dout("%s: rbdc %p\n", __func__, rbdc);
913 spin_lock(&rbd_client_list_lock);
914 list_del(&rbdc->node);
915 spin_unlock(&rbd_client_list_lock);
917 ceph_destroy_client(rbdc->client);
922 * Drop reference to ceph client node. If it's not referenced anymore, release
925 static void rbd_put_client(struct rbd_client *rbdc)
928 kref_put(&rbdc->kref, rbd_client_release);
931 static bool rbd_image_format_valid(u32 image_format)
933 return image_format == 1 || image_format == 2;
936 static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk *ondisk)
941 /* The header has to start with the magic rbd header text */
942 if (memcmp(&ondisk->text, RBD_HEADER_TEXT, sizeof (RBD_HEADER_TEXT)))
945 /* The bio layer requires at least sector-sized I/O */
947 if (ondisk->options.order < SECTOR_SHIFT)
950 /* If we use u64 in a few spots we may be able to loosen this */
952 if (ondisk->options.order > 8 * sizeof (int) - 1)
956 * The size of a snapshot header has to fit in a size_t, and
957 * that limits the number of snapshots.
959 snap_count = le32_to_cpu(ondisk->snap_count);
960 size = SIZE_MAX - sizeof (struct ceph_snap_context);
961 if (snap_count > size / sizeof (__le64))
965 * Not only that, but the size of the entire the snapshot
966 * header must also be representable in a size_t.
968 size -= snap_count * sizeof (__le64);
969 if ((u64) size < le64_to_cpu(ondisk->snap_names_len))
976 * Fill an rbd image header with information from the given format 1
979 static int rbd_header_from_disk(struct rbd_device *rbd_dev,
980 struct rbd_image_header_ondisk *ondisk)
982 struct rbd_image_header *header = &rbd_dev->header;
983 bool first_time = header->object_prefix == NULL;
984 struct ceph_snap_context *snapc;
985 char *object_prefix = NULL;
986 char *snap_names = NULL;
987 u64 *snap_sizes = NULL;
993 /* Allocate this now to avoid having to handle failure below */
998 len = strnlen(ondisk->object_prefix,
999 sizeof (ondisk->object_prefix));
1000 object_prefix = kmalloc(len + 1, GFP_KERNEL);
1003 memcpy(object_prefix, ondisk->object_prefix, len);
1004 object_prefix[len] = '\0';
1007 /* Allocate the snapshot context and fill it in */
1009 snap_count = le32_to_cpu(ondisk->snap_count);
1010 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
1013 snapc->seq = le64_to_cpu(ondisk->snap_seq);
1015 struct rbd_image_snap_ondisk *snaps;
1016 u64 snap_names_len = le64_to_cpu(ondisk->snap_names_len);
1018 /* We'll keep a copy of the snapshot names... */
1020 if (snap_names_len > (u64)SIZE_MAX)
1022 snap_names = kmalloc(snap_names_len, GFP_KERNEL);
1026 /* ...as well as the array of their sizes. */
1028 size = snap_count * sizeof (*header->snap_sizes);
1029 snap_sizes = kmalloc(size, GFP_KERNEL);
1034 * Copy the names, and fill in each snapshot's id
1037 * Note that rbd_dev_v1_header_info() guarantees the
1038 * ondisk buffer we're working with has
1039 * snap_names_len bytes beyond the end of the
1040 * snapshot id array, this memcpy() is safe.
1042 memcpy(snap_names, &ondisk->snaps[snap_count], snap_names_len);
1043 snaps = ondisk->snaps;
1044 for (i = 0; i < snap_count; i++) {
1045 snapc->snaps[i] = le64_to_cpu(snaps[i].id);
1046 snap_sizes[i] = le64_to_cpu(snaps[i].image_size);
1050 /* We won't fail any more, fill in the header */
1053 header->object_prefix = object_prefix;
1054 header->obj_order = ondisk->options.order;
1055 header->crypt_type = ondisk->options.crypt_type;
1056 header->comp_type = ondisk->options.comp_type;
1057 /* The rest aren't used for format 1 images */
1058 header->stripe_unit = 0;
1059 header->stripe_count = 0;
1060 header->features = 0;
1062 ceph_put_snap_context(header->snapc);
1063 kfree(header->snap_names);
1064 kfree(header->snap_sizes);
1067 /* The remaining fields always get updated (when we refresh) */
1069 header->image_size = le64_to_cpu(ondisk->image_size);
1070 header->snapc = snapc;
1071 header->snap_names = snap_names;
1072 header->snap_sizes = snap_sizes;
1080 ceph_put_snap_context(snapc);
1081 kfree(object_prefix);
1086 static const char *_rbd_dev_v1_snap_name(struct rbd_device *rbd_dev, u32 which)
1088 const char *snap_name;
1090 rbd_assert(which < rbd_dev->header.snapc->num_snaps);
1092 /* Skip over names until we find the one we are looking for */
1094 snap_name = rbd_dev->header.snap_names;
1096 snap_name += strlen(snap_name) + 1;
1098 return kstrdup(snap_name, GFP_KERNEL);
1102 * Snapshot id comparison function for use with qsort()/bsearch().
1103 * Note that result is for snapshots in *descending* order.
1105 static int snapid_compare_reverse(const void *s1, const void *s2)
1107 u64 snap_id1 = *(u64 *)s1;
1108 u64 snap_id2 = *(u64 *)s2;
1110 if (snap_id1 < snap_id2)
1112 return snap_id1 == snap_id2 ? 0 : -1;
1116 * Search a snapshot context to see if the given snapshot id is
1119 * Returns the position of the snapshot id in the array if it's found,
1120 * or BAD_SNAP_INDEX otherwise.
1122 * Note: The snapshot array is in kept sorted (by the osd) in
1123 * reverse order, highest snapshot id first.
1125 static u32 rbd_dev_snap_index(struct rbd_device *rbd_dev, u64 snap_id)
1127 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
1130 found = bsearch(&snap_id, &snapc->snaps, snapc->num_snaps,
1131 sizeof (snap_id), snapid_compare_reverse);
1133 return found ? (u32)(found - &snapc->snaps[0]) : BAD_SNAP_INDEX;
1136 static const char *rbd_dev_v1_snap_name(struct rbd_device *rbd_dev,
1140 const char *snap_name;
1142 which = rbd_dev_snap_index(rbd_dev, snap_id);
1143 if (which == BAD_SNAP_INDEX)
1144 return ERR_PTR(-ENOENT);
1146 snap_name = _rbd_dev_v1_snap_name(rbd_dev, which);
1147 return snap_name ? snap_name : ERR_PTR(-ENOMEM);
1150 static const char *rbd_snap_name(struct rbd_device *rbd_dev, u64 snap_id)
1152 if (snap_id == CEPH_NOSNAP)
1153 return RBD_SNAP_HEAD_NAME;
1155 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1156 if (rbd_dev->image_format == 1)
1157 return rbd_dev_v1_snap_name(rbd_dev, snap_id);
1159 return rbd_dev_v2_snap_name(rbd_dev, snap_id);
1162 static int rbd_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
1165 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1166 if (snap_id == CEPH_NOSNAP) {
1167 *snap_size = rbd_dev->header.image_size;
1168 } else if (rbd_dev->image_format == 1) {
1171 which = rbd_dev_snap_index(rbd_dev, snap_id);
1172 if (which == BAD_SNAP_INDEX)
1175 *snap_size = rbd_dev->header.snap_sizes[which];
1180 ret = _rbd_dev_v2_snap_size(rbd_dev, snap_id, NULL, &size);
1189 static int rbd_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
1192 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1193 if (snap_id == CEPH_NOSNAP) {
1194 *snap_features = rbd_dev->header.features;
1195 } else if (rbd_dev->image_format == 1) {
1196 *snap_features = 0; /* No features for format 1 */
1201 ret = _rbd_dev_v2_snap_features(rbd_dev, snap_id, &features);
1205 *snap_features = features;
1210 static int rbd_dev_mapping_set(struct rbd_device *rbd_dev)
1212 u64 snap_id = rbd_dev->spec->snap_id;
1217 ret = rbd_snap_size(rbd_dev, snap_id, &size);
1220 ret = rbd_snap_features(rbd_dev, snap_id, &features);
1224 rbd_dev->mapping.size = size;
1225 rbd_dev->mapping.features = features;
1230 static void rbd_dev_mapping_clear(struct rbd_device *rbd_dev)
1232 rbd_dev->mapping.size = 0;
1233 rbd_dev->mapping.features = 0;
1236 static void rbd_segment_name_free(const char *name)
1238 /* The explicit cast here is needed to drop the const qualifier */
1240 kmem_cache_free(rbd_segment_name_cache, (void *)name);
1243 static const char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset)
1250 name = kmem_cache_alloc(rbd_segment_name_cache, GFP_NOIO);
1253 segment = offset >> rbd_dev->header.obj_order;
1254 name_format = "%s.%012llx";
1255 if (rbd_dev->image_format == 2)
1256 name_format = "%s.%016llx";
1257 ret = snprintf(name, CEPH_MAX_OID_NAME_LEN + 1, name_format,
1258 rbd_dev->header.object_prefix, segment);
1259 if (ret < 0 || ret > CEPH_MAX_OID_NAME_LEN) {
1260 pr_err("error formatting segment name for #%llu (%d)\n",
1262 rbd_segment_name_free(name);
1269 static u64 rbd_segment_offset(struct rbd_device *rbd_dev, u64 offset)
1271 u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
1273 return offset & (segment_size - 1);
1276 static u64 rbd_segment_length(struct rbd_device *rbd_dev,
1277 u64 offset, u64 length)
1279 u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
1281 offset &= segment_size - 1;
1283 rbd_assert(length <= U64_MAX - offset);
1284 if (offset + length > segment_size)
1285 length = segment_size - offset;
1291 * returns the size of an object in the image
1293 static u64 rbd_obj_bytes(struct rbd_image_header *header)
1295 return 1 << header->obj_order;
1302 static void bio_chain_put(struct bio *chain)
1308 chain = chain->bi_next;
1314 * zeros a bio chain, starting at specific offset
1316 static void zero_bio_chain(struct bio *chain, int start_ofs)
1319 struct bvec_iter iter;
1320 unsigned long flags;
1325 bio_for_each_segment(bv, chain, iter) {
1326 if (pos + bv.bv_len > start_ofs) {
1327 int remainder = max(start_ofs - pos, 0);
1328 buf = bvec_kmap_irq(&bv, &flags);
1329 memset(buf + remainder, 0,
1330 bv.bv_len - remainder);
1331 flush_dcache_page(bv.bv_page);
1332 bvec_kunmap_irq(buf, &flags);
1337 chain = chain->bi_next;
1342 * similar to zero_bio_chain(), zeros data defined by a page array,
1343 * starting at the given byte offset from the start of the array and
1344 * continuing up to the given end offset. The pages array is
1345 * assumed to be big enough to hold all bytes up to the end.
1347 static void zero_pages(struct page **pages, u64 offset, u64 end)
1349 struct page **page = &pages[offset >> PAGE_SHIFT];
1351 rbd_assert(end > offset);
1352 rbd_assert(end - offset <= (u64)SIZE_MAX);
1353 while (offset < end) {
1356 unsigned long flags;
1359 page_offset = offset & ~PAGE_MASK;
1360 length = min_t(size_t, PAGE_SIZE - page_offset, end - offset);
1361 local_irq_save(flags);
1362 kaddr = kmap_atomic(*page);
1363 memset(kaddr + page_offset, 0, length);
1364 flush_dcache_page(*page);
1365 kunmap_atomic(kaddr);
1366 local_irq_restore(flags);
1374 * Clone a portion of a bio, starting at the given byte offset
1375 * and continuing for the number of bytes indicated.
1377 static struct bio *bio_clone_range(struct bio *bio_src,
1378 unsigned int offset,
1384 bio = bio_clone(bio_src, gfpmask);
1386 return NULL; /* ENOMEM */
1388 bio_advance(bio, offset);
1389 bio->bi_iter.bi_size = len;
1395 * Clone a portion of a bio chain, starting at the given byte offset
1396 * into the first bio in the source chain and continuing for the
1397 * number of bytes indicated. The result is another bio chain of
1398 * exactly the given length, or a null pointer on error.
1400 * The bio_src and offset parameters are both in-out. On entry they
1401 * refer to the first source bio and the offset into that bio where
1402 * the start of data to be cloned is located.
1404 * On return, bio_src is updated to refer to the bio in the source
1405 * chain that contains first un-cloned byte, and *offset will
1406 * contain the offset of that byte within that bio.
1408 static struct bio *bio_chain_clone_range(struct bio **bio_src,
1409 unsigned int *offset,
1413 struct bio *bi = *bio_src;
1414 unsigned int off = *offset;
1415 struct bio *chain = NULL;
1418 /* Build up a chain of clone bios up to the limit */
1420 if (!bi || off >= bi->bi_iter.bi_size || !len)
1421 return NULL; /* Nothing to clone */
1425 unsigned int bi_size;
1429 rbd_warn(NULL, "bio_chain exhausted with %u left", len);
1430 goto out_err; /* EINVAL; ran out of bio's */
1432 bi_size = min_t(unsigned int, bi->bi_iter.bi_size - off, len);
1433 bio = bio_clone_range(bi, off, bi_size, gfpmask);
1435 goto out_err; /* ENOMEM */
1438 end = &bio->bi_next;
1441 if (off == bi->bi_iter.bi_size) {
1452 bio_chain_put(chain);
1458 * The default/initial value for all object request flags is 0. For
1459 * each flag, once its value is set to 1 it is never reset to 0
1462 static void obj_request_img_data_set(struct rbd_obj_request *obj_request)
1464 if (test_and_set_bit(OBJ_REQ_IMG_DATA, &obj_request->flags)) {
1465 struct rbd_device *rbd_dev;
1467 rbd_dev = obj_request->img_request->rbd_dev;
1468 rbd_warn(rbd_dev, "obj_request %p already marked img_data",
1473 static bool obj_request_img_data_test(struct rbd_obj_request *obj_request)
1476 return test_bit(OBJ_REQ_IMG_DATA, &obj_request->flags) != 0;
1479 static void obj_request_done_set(struct rbd_obj_request *obj_request)
1481 if (test_and_set_bit(OBJ_REQ_DONE, &obj_request->flags)) {
1482 struct rbd_device *rbd_dev = NULL;
1484 if (obj_request_img_data_test(obj_request))
1485 rbd_dev = obj_request->img_request->rbd_dev;
1486 rbd_warn(rbd_dev, "obj_request %p already marked done",
1491 static bool obj_request_done_test(struct rbd_obj_request *obj_request)
1494 return test_bit(OBJ_REQ_DONE, &obj_request->flags) != 0;
1498 * This sets the KNOWN flag after (possibly) setting the EXISTS
1499 * flag. The latter is set based on the "exists" value provided.
1501 * Note that for our purposes once an object exists it never goes
1502 * away again. It's possible that the response from two existence
1503 * checks are separated by the creation of the target object, and
1504 * the first ("doesn't exist") response arrives *after* the second
1505 * ("does exist"). In that case we ignore the second one.
1507 static void obj_request_existence_set(struct rbd_obj_request *obj_request,
1511 set_bit(OBJ_REQ_EXISTS, &obj_request->flags);
1512 set_bit(OBJ_REQ_KNOWN, &obj_request->flags);
1516 static bool obj_request_known_test(struct rbd_obj_request *obj_request)
1519 return test_bit(OBJ_REQ_KNOWN, &obj_request->flags) != 0;
1522 static bool obj_request_exists_test(struct rbd_obj_request *obj_request)
1525 return test_bit(OBJ_REQ_EXISTS, &obj_request->flags) != 0;
1528 static bool obj_request_overlaps_parent(struct rbd_obj_request *obj_request)
1530 struct rbd_device *rbd_dev = obj_request->img_request->rbd_dev;
1532 return obj_request->img_offset <
1533 round_up(rbd_dev->parent_overlap, rbd_obj_bytes(&rbd_dev->header));
1536 static void rbd_obj_request_get(struct rbd_obj_request *obj_request)
1538 dout("%s: obj %p (was %d)\n", __func__, obj_request,
1539 atomic_read(&obj_request->kref.refcount));
1540 kref_get(&obj_request->kref);
1543 static void rbd_obj_request_destroy(struct kref *kref);
1544 static void rbd_obj_request_put(struct rbd_obj_request *obj_request)
1546 rbd_assert(obj_request != NULL);
1547 dout("%s: obj %p (was %d)\n", __func__, obj_request,
1548 atomic_read(&obj_request->kref.refcount));
1549 kref_put(&obj_request->kref, rbd_obj_request_destroy);
1552 static void rbd_img_request_get(struct rbd_img_request *img_request)
1554 dout("%s: img %p (was %d)\n", __func__, img_request,
1555 atomic_read(&img_request->kref.refcount));
1556 kref_get(&img_request->kref);
1559 static bool img_request_child_test(struct rbd_img_request *img_request);
1560 static void rbd_parent_request_destroy(struct kref *kref);
1561 static void rbd_img_request_destroy(struct kref *kref);
1562 static void rbd_img_request_put(struct rbd_img_request *img_request)
1564 rbd_assert(img_request != NULL);
1565 dout("%s: img %p (was %d)\n", __func__, img_request,
1566 atomic_read(&img_request->kref.refcount));
1567 if (img_request_child_test(img_request))
1568 kref_put(&img_request->kref, rbd_parent_request_destroy);
1570 kref_put(&img_request->kref, rbd_img_request_destroy);
1573 static inline void rbd_img_obj_request_add(struct rbd_img_request *img_request,
1574 struct rbd_obj_request *obj_request)
1576 rbd_assert(obj_request->img_request == NULL);
1578 /* Image request now owns object's original reference */
1579 obj_request->img_request = img_request;
1580 obj_request->which = img_request->obj_request_count;
1581 rbd_assert(!obj_request_img_data_test(obj_request));
1582 obj_request_img_data_set(obj_request);
1583 rbd_assert(obj_request->which != BAD_WHICH);
1584 img_request->obj_request_count++;
1585 list_add_tail(&obj_request->links, &img_request->obj_requests);
1586 dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
1587 obj_request->which);
1590 static inline void rbd_img_obj_request_del(struct rbd_img_request *img_request,
1591 struct rbd_obj_request *obj_request)
1593 rbd_assert(obj_request->which != BAD_WHICH);
1595 dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
1596 obj_request->which);
1597 list_del(&obj_request->links);
1598 rbd_assert(img_request->obj_request_count > 0);
1599 img_request->obj_request_count--;
1600 rbd_assert(obj_request->which == img_request->obj_request_count);
1601 obj_request->which = BAD_WHICH;
1602 rbd_assert(obj_request_img_data_test(obj_request));
1603 rbd_assert(obj_request->img_request == img_request);
1604 obj_request->img_request = NULL;
1605 obj_request->callback = NULL;
1606 rbd_obj_request_put(obj_request);
1609 static bool obj_request_type_valid(enum obj_request_type type)
1612 case OBJ_REQUEST_NODATA:
1613 case OBJ_REQUEST_BIO:
1614 case OBJ_REQUEST_PAGES:
1621 static void rbd_img_obj_callback(struct rbd_obj_request *obj_request);
1623 static void rbd_obj_request_submit(struct rbd_obj_request *obj_request)
1625 struct ceph_osd_request *osd_req = obj_request->osd_req;
1627 dout("%s %p osd_req %p\n", __func__, obj_request, osd_req);
1628 if (obj_request_img_data_test(obj_request)) {
1629 WARN_ON(obj_request->callback != rbd_img_obj_callback);
1630 rbd_img_request_get(obj_request->img_request);
1632 ceph_osdc_start_request(osd_req->r_osdc, osd_req, false);
1635 static void rbd_obj_request_end(struct rbd_obj_request *obj_request)
1637 dout("%s %p\n", __func__, obj_request);
1638 ceph_osdc_cancel_request(obj_request->osd_req);
1642 * Wait for an object request to complete. If interrupted, cancel the
1643 * underlying osd request.
1645 * @timeout: in jiffies, 0 means "wait forever"
1647 static int __rbd_obj_request_wait(struct rbd_obj_request *obj_request,
1648 unsigned long timeout)
1652 dout("%s %p\n", __func__, obj_request);
1653 ret = wait_for_completion_interruptible_timeout(
1654 &obj_request->completion,
1655 ceph_timeout_jiffies(timeout));
1659 rbd_obj_request_end(obj_request);
1664 dout("%s %p ret %d\n", __func__, obj_request, (int)ret);
1668 static int rbd_obj_request_wait(struct rbd_obj_request *obj_request)
1670 return __rbd_obj_request_wait(obj_request, 0);
1673 static void rbd_img_request_complete(struct rbd_img_request *img_request)
1676 dout("%s: img %p\n", __func__, img_request);
1679 * If no error occurred, compute the aggregate transfer
1680 * count for the image request. We could instead use
1681 * atomic64_cmpxchg() to update it as each object request
1682 * completes; not clear which way is better off hand.
1684 if (!img_request->result) {
1685 struct rbd_obj_request *obj_request;
1688 for_each_obj_request(img_request, obj_request)
1689 xferred += obj_request->xferred;
1690 img_request->xferred = xferred;
1693 if (img_request->callback)
1694 img_request->callback(img_request);
1696 rbd_img_request_put(img_request);
1700 * The default/initial value for all image request flags is 0. Each
1701 * is conditionally set to 1 at image request initialization time
1702 * and currently never change thereafter.
1704 static void img_request_write_set(struct rbd_img_request *img_request)
1706 set_bit(IMG_REQ_WRITE, &img_request->flags);
1710 static bool img_request_write_test(struct rbd_img_request *img_request)
1713 return test_bit(IMG_REQ_WRITE, &img_request->flags) != 0;
1717 * Set the discard flag when the img_request is an discard request
1719 static void img_request_discard_set(struct rbd_img_request *img_request)
1721 set_bit(IMG_REQ_DISCARD, &img_request->flags);
1725 static bool img_request_discard_test(struct rbd_img_request *img_request)
1728 return test_bit(IMG_REQ_DISCARD, &img_request->flags) != 0;
1731 static void img_request_child_set(struct rbd_img_request *img_request)
1733 set_bit(IMG_REQ_CHILD, &img_request->flags);
1737 static void img_request_child_clear(struct rbd_img_request *img_request)
1739 clear_bit(IMG_REQ_CHILD, &img_request->flags);
1743 static bool img_request_child_test(struct rbd_img_request *img_request)
1746 return test_bit(IMG_REQ_CHILD, &img_request->flags) != 0;
1749 static void img_request_layered_set(struct rbd_img_request *img_request)
1751 set_bit(IMG_REQ_LAYERED, &img_request->flags);
1755 static void img_request_layered_clear(struct rbd_img_request *img_request)
1757 clear_bit(IMG_REQ_LAYERED, &img_request->flags);
1761 static bool img_request_layered_test(struct rbd_img_request *img_request)
1764 return test_bit(IMG_REQ_LAYERED, &img_request->flags) != 0;
1767 static enum obj_operation_type
1768 rbd_img_request_op_type(struct rbd_img_request *img_request)
1770 if (img_request_write_test(img_request))
1771 return OBJ_OP_WRITE;
1772 else if (img_request_discard_test(img_request))
1773 return OBJ_OP_DISCARD;
1779 rbd_img_obj_request_read_callback(struct rbd_obj_request *obj_request)
1781 u64 xferred = obj_request->xferred;
1782 u64 length = obj_request->length;
1784 dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
1785 obj_request, obj_request->img_request, obj_request->result,
1788 * ENOENT means a hole in the image. We zero-fill the entire
1789 * length of the request. A short read also implies zero-fill
1790 * to the end of the request. An error requires the whole
1791 * length of the request to be reported finished with an error
1792 * to the block layer. In each case we update the xferred
1793 * count to indicate the whole request was satisfied.
1795 rbd_assert(obj_request->type != OBJ_REQUEST_NODATA);
1796 if (obj_request->result == -ENOENT) {
1797 if (obj_request->type == OBJ_REQUEST_BIO)
1798 zero_bio_chain(obj_request->bio_list, 0);
1800 zero_pages(obj_request->pages, 0, length);
1801 obj_request->result = 0;
1802 } else if (xferred < length && !obj_request->result) {
1803 if (obj_request->type == OBJ_REQUEST_BIO)
1804 zero_bio_chain(obj_request->bio_list, xferred);
1806 zero_pages(obj_request->pages, xferred, length);
1808 obj_request->xferred = length;
1809 obj_request_done_set(obj_request);
1812 static void rbd_obj_request_complete(struct rbd_obj_request *obj_request)
1814 dout("%s: obj %p cb %p\n", __func__, obj_request,
1815 obj_request->callback);
1816 if (obj_request->callback)
1817 obj_request->callback(obj_request);
1819 complete_all(&obj_request->completion);
1822 static void rbd_osd_read_callback(struct rbd_obj_request *obj_request)
1824 struct rbd_img_request *img_request = NULL;
1825 struct rbd_device *rbd_dev = NULL;
1826 bool layered = false;
1828 if (obj_request_img_data_test(obj_request)) {
1829 img_request = obj_request->img_request;
1830 layered = img_request && img_request_layered_test(img_request);
1831 rbd_dev = img_request->rbd_dev;
1834 dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
1835 obj_request, img_request, obj_request->result,
1836 obj_request->xferred, obj_request->length);
1837 if (layered && obj_request->result == -ENOENT &&
1838 obj_request->img_offset < rbd_dev->parent_overlap)
1839 rbd_img_parent_read(obj_request);
1840 else if (img_request)
1841 rbd_img_obj_request_read_callback(obj_request);
1843 obj_request_done_set(obj_request);
1846 static void rbd_osd_write_callback(struct rbd_obj_request *obj_request)
1848 dout("%s: obj %p result %d %llu\n", __func__, obj_request,
1849 obj_request->result, obj_request->length);
1851 * There is no such thing as a successful short write. Set
1852 * it to our originally-requested length.
1854 obj_request->xferred = obj_request->length;
1855 obj_request_done_set(obj_request);
1858 static void rbd_osd_discard_callback(struct rbd_obj_request *obj_request)
1860 dout("%s: obj %p result %d %llu\n", __func__, obj_request,
1861 obj_request->result, obj_request->length);
1863 * There is no such thing as a successful short discard. Set
1864 * it to our originally-requested length.
1866 obj_request->xferred = obj_request->length;
1867 /* discarding a non-existent object is not a problem */
1868 if (obj_request->result == -ENOENT)
1869 obj_request->result = 0;
1870 obj_request_done_set(obj_request);
1874 * For a simple stat call there's nothing to do. We'll do more if
1875 * this is part of a write sequence for a layered image.
1877 static void rbd_osd_stat_callback(struct rbd_obj_request *obj_request)
1879 dout("%s: obj %p\n", __func__, obj_request);
1880 obj_request_done_set(obj_request);
1883 static void rbd_osd_call_callback(struct rbd_obj_request *obj_request)
1885 dout("%s: obj %p\n", __func__, obj_request);
1887 if (obj_request_img_data_test(obj_request))
1888 rbd_osd_copyup_callback(obj_request);
1890 obj_request_done_set(obj_request);
1893 static void rbd_osd_req_callback(struct ceph_osd_request *osd_req)
1895 struct rbd_obj_request *obj_request = osd_req->r_priv;
1898 dout("%s: osd_req %p\n", __func__, osd_req);
1899 rbd_assert(osd_req == obj_request->osd_req);
1900 if (obj_request_img_data_test(obj_request)) {
1901 rbd_assert(obj_request->img_request);
1902 rbd_assert(obj_request->which != BAD_WHICH);
1904 rbd_assert(obj_request->which == BAD_WHICH);
1907 if (osd_req->r_result < 0)
1908 obj_request->result = osd_req->r_result;
1911 * We support a 64-bit length, but ultimately it has to be
1912 * passed to the block layer, which just supports a 32-bit
1915 obj_request->xferred = osd_req->r_ops[0].outdata_len;
1916 rbd_assert(obj_request->xferred < (u64)UINT_MAX);
1918 opcode = osd_req->r_ops[0].op;
1920 case CEPH_OSD_OP_READ:
1921 rbd_osd_read_callback(obj_request);
1923 case CEPH_OSD_OP_SETALLOCHINT:
1924 rbd_assert(osd_req->r_ops[1].op == CEPH_OSD_OP_WRITE ||
1925 osd_req->r_ops[1].op == CEPH_OSD_OP_WRITEFULL);
1927 case CEPH_OSD_OP_WRITE:
1928 case CEPH_OSD_OP_WRITEFULL:
1929 rbd_osd_write_callback(obj_request);
1931 case CEPH_OSD_OP_STAT:
1932 rbd_osd_stat_callback(obj_request);
1934 case CEPH_OSD_OP_DELETE:
1935 case CEPH_OSD_OP_TRUNCATE:
1936 case CEPH_OSD_OP_ZERO:
1937 rbd_osd_discard_callback(obj_request);
1939 case CEPH_OSD_OP_CALL:
1940 rbd_osd_call_callback(obj_request);
1943 rbd_warn(NULL, "%s: unsupported op %hu",
1944 obj_request->object_name, (unsigned short) opcode);
1948 if (obj_request_done_test(obj_request))
1949 rbd_obj_request_complete(obj_request);
1952 static void rbd_osd_req_format_read(struct rbd_obj_request *obj_request)
1954 struct rbd_img_request *img_request = obj_request->img_request;
1955 struct ceph_osd_request *osd_req = obj_request->osd_req;
1958 osd_req->r_snapid = img_request->snap_id;
1961 static void rbd_osd_req_format_write(struct rbd_obj_request *obj_request)
1963 struct ceph_osd_request *osd_req = obj_request->osd_req;
1965 osd_req->r_mtime = CURRENT_TIME;
1966 osd_req->r_data_offset = obj_request->offset;
1970 * Create an osd request. A read request has one osd op (read).
1971 * A write request has either one (watch) or two (hint+write) osd ops.
1972 * (All rbd data writes are prefixed with an allocation hint op, but
1973 * technically osd watch is a write request, hence this distinction.)
1975 static struct ceph_osd_request *rbd_osd_req_create(
1976 struct rbd_device *rbd_dev,
1977 enum obj_operation_type op_type,
1978 unsigned int num_ops,
1979 struct rbd_obj_request *obj_request)
1981 struct ceph_snap_context *snapc = NULL;
1982 struct ceph_osd_client *osdc;
1983 struct ceph_osd_request *osd_req;
1985 if (obj_request_img_data_test(obj_request) &&
1986 (op_type == OBJ_OP_DISCARD || op_type == OBJ_OP_WRITE)) {
1987 struct rbd_img_request *img_request = obj_request->img_request;
1988 if (op_type == OBJ_OP_WRITE) {
1989 rbd_assert(img_request_write_test(img_request));
1991 rbd_assert(img_request_discard_test(img_request));
1993 snapc = img_request->snapc;
1996 rbd_assert(num_ops == 1 || ((op_type == OBJ_OP_WRITE) && num_ops == 2));
1998 /* Allocate and initialize the request, for the num_ops ops */
2000 osdc = &rbd_dev->rbd_client->client->osdc;
2001 osd_req = ceph_osdc_alloc_request(osdc, snapc, num_ops, false,
2006 if (op_type == OBJ_OP_WRITE || op_type == OBJ_OP_DISCARD)
2007 osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
2009 osd_req->r_flags = CEPH_OSD_FLAG_READ;
2011 osd_req->r_callback = rbd_osd_req_callback;
2012 osd_req->r_priv = obj_request;
2014 osd_req->r_base_oloc.pool = rbd_dev->layout.pool_id;
2015 if (ceph_oid_aprintf(&osd_req->r_base_oid, GFP_NOIO, "%s",
2016 obj_request->object_name))
2019 if (ceph_osdc_alloc_messages(osd_req, GFP_NOIO))
2025 ceph_osdc_put_request(osd_req);
2030 * Create a copyup osd request based on the information in the object
2031 * request supplied. A copyup request has two or three osd ops, a
2032 * copyup method call, potentially a hint op, and a write or truncate
2035 static struct ceph_osd_request *
2036 rbd_osd_req_create_copyup(struct rbd_obj_request *obj_request)
2038 struct rbd_img_request *img_request;
2039 struct ceph_snap_context *snapc;
2040 struct rbd_device *rbd_dev;
2041 struct ceph_osd_client *osdc;
2042 struct ceph_osd_request *osd_req;
2043 int num_osd_ops = 3;
2045 rbd_assert(obj_request_img_data_test(obj_request));
2046 img_request = obj_request->img_request;
2047 rbd_assert(img_request);
2048 rbd_assert(img_request_write_test(img_request) ||
2049 img_request_discard_test(img_request));
2051 if (img_request_discard_test(img_request))
2054 /* Allocate and initialize the request, for all the ops */
2056 snapc = img_request->snapc;
2057 rbd_dev = img_request->rbd_dev;
2058 osdc = &rbd_dev->rbd_client->client->osdc;
2059 osd_req = ceph_osdc_alloc_request(osdc, snapc, num_osd_ops,
2064 osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
2065 osd_req->r_callback = rbd_osd_req_callback;
2066 osd_req->r_priv = obj_request;
2068 osd_req->r_base_oloc.pool = rbd_dev->layout.pool_id;
2069 if (ceph_oid_aprintf(&osd_req->r_base_oid, GFP_NOIO, "%s",
2070 obj_request->object_name))
2073 if (ceph_osdc_alloc_messages(osd_req, GFP_NOIO))
2079 ceph_osdc_put_request(osd_req);
2084 static void rbd_osd_req_destroy(struct ceph_osd_request *osd_req)
2086 ceph_osdc_put_request(osd_req);
2089 /* object_name is assumed to be a non-null pointer and NUL-terminated */
2091 static struct rbd_obj_request *rbd_obj_request_create(const char *object_name,
2092 u64 offset, u64 length,
2093 enum obj_request_type type)
2095 struct rbd_obj_request *obj_request;
2099 rbd_assert(obj_request_type_valid(type));
2101 size = strlen(object_name) + 1;
2102 name = kmalloc(size, GFP_NOIO);
2106 obj_request = kmem_cache_zalloc(rbd_obj_request_cache, GFP_NOIO);
2112 obj_request->object_name = memcpy(name, object_name, size);
2113 obj_request->offset = offset;
2114 obj_request->length = length;
2115 obj_request->flags = 0;
2116 obj_request->which = BAD_WHICH;
2117 obj_request->type = type;
2118 INIT_LIST_HEAD(&obj_request->links);
2119 init_completion(&obj_request->completion);
2120 kref_init(&obj_request->kref);
2122 dout("%s: \"%s\" %llu/%llu %d -> obj %p\n", __func__, object_name,
2123 offset, length, (int)type, obj_request);
2128 static void rbd_obj_request_destroy(struct kref *kref)
2130 struct rbd_obj_request *obj_request;
2132 obj_request = container_of(kref, struct rbd_obj_request, kref);
2134 dout("%s: obj %p\n", __func__, obj_request);
2136 rbd_assert(obj_request->img_request == NULL);
2137 rbd_assert(obj_request->which == BAD_WHICH);
2139 if (obj_request->osd_req)
2140 rbd_osd_req_destroy(obj_request->osd_req);
2142 rbd_assert(obj_request_type_valid(obj_request->type));
2143 switch (obj_request->type) {
2144 case OBJ_REQUEST_NODATA:
2145 break; /* Nothing to do */
2146 case OBJ_REQUEST_BIO:
2147 if (obj_request->bio_list)
2148 bio_chain_put(obj_request->bio_list);
2150 case OBJ_REQUEST_PAGES:
2151 if (obj_request->pages)
2152 ceph_release_page_vector(obj_request->pages,
2153 obj_request->page_count);
2157 kfree(obj_request->object_name);
2158 obj_request->object_name = NULL;
2159 kmem_cache_free(rbd_obj_request_cache, obj_request);
2162 /* It's OK to call this for a device with no parent */
2164 static void rbd_spec_put(struct rbd_spec *spec);
2165 static void rbd_dev_unparent(struct rbd_device *rbd_dev)
2167 rbd_dev_remove_parent(rbd_dev);
2168 rbd_spec_put(rbd_dev->parent_spec);
2169 rbd_dev->parent_spec = NULL;
2170 rbd_dev->parent_overlap = 0;
2174 * Parent image reference counting is used to determine when an
2175 * image's parent fields can be safely torn down--after there are no
2176 * more in-flight requests to the parent image. When the last
2177 * reference is dropped, cleaning them up is safe.
2179 static void rbd_dev_parent_put(struct rbd_device *rbd_dev)
2183 if (!rbd_dev->parent_spec)
2186 counter = atomic_dec_return_safe(&rbd_dev->parent_ref);
2190 /* Last reference; clean up parent data structures */
2193 rbd_dev_unparent(rbd_dev);
2195 rbd_warn(rbd_dev, "parent reference underflow");
2199 * If an image has a non-zero parent overlap, get a reference to its
2202 * Returns true if the rbd device has a parent with a non-zero
2203 * overlap and a reference for it was successfully taken, or
2206 static bool rbd_dev_parent_get(struct rbd_device *rbd_dev)
2210 if (!rbd_dev->parent_spec)
2213 down_read(&rbd_dev->header_rwsem);
2214 if (rbd_dev->parent_overlap)
2215 counter = atomic_inc_return_safe(&rbd_dev->parent_ref);
2216 up_read(&rbd_dev->header_rwsem);
2219 rbd_warn(rbd_dev, "parent reference overflow");
2225 * Caller is responsible for filling in the list of object requests
2226 * that comprises the image request, and the Linux request pointer
2227 * (if there is one).
2229 static struct rbd_img_request *rbd_img_request_create(
2230 struct rbd_device *rbd_dev,
2231 u64 offset, u64 length,
2232 enum obj_operation_type op_type,
2233 struct ceph_snap_context *snapc)
2235 struct rbd_img_request *img_request;
2237 img_request = kmem_cache_alloc(rbd_img_request_cache, GFP_NOIO);
2241 img_request->rq = NULL;
2242 img_request->rbd_dev = rbd_dev;
2243 img_request->offset = offset;
2244 img_request->length = length;
2245 img_request->flags = 0;
2246 if (op_type == OBJ_OP_DISCARD) {
2247 img_request_discard_set(img_request);
2248 img_request->snapc = snapc;
2249 } else if (op_type == OBJ_OP_WRITE) {
2250 img_request_write_set(img_request);
2251 img_request->snapc = snapc;
2253 img_request->snap_id = rbd_dev->spec->snap_id;
2255 if (rbd_dev_parent_get(rbd_dev))
2256 img_request_layered_set(img_request);
2257 spin_lock_init(&img_request->completion_lock);
2258 img_request->next_completion = 0;
2259 img_request->callback = NULL;
2260 img_request->result = 0;
2261 img_request->obj_request_count = 0;
2262 INIT_LIST_HEAD(&img_request->obj_requests);
2263 kref_init(&img_request->kref);
2265 dout("%s: rbd_dev %p %s %llu/%llu -> img %p\n", __func__, rbd_dev,
2266 obj_op_name(op_type), offset, length, img_request);
2271 static void rbd_img_request_destroy(struct kref *kref)
2273 struct rbd_img_request *img_request;
2274 struct rbd_obj_request *obj_request;
2275 struct rbd_obj_request *next_obj_request;
2277 img_request = container_of(kref, struct rbd_img_request, kref);
2279 dout("%s: img %p\n", __func__, img_request);
2281 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
2282 rbd_img_obj_request_del(img_request, obj_request);
2283 rbd_assert(img_request->obj_request_count == 0);
2285 if (img_request_layered_test(img_request)) {
2286 img_request_layered_clear(img_request);
2287 rbd_dev_parent_put(img_request->rbd_dev);
2290 if (img_request_write_test(img_request) ||
2291 img_request_discard_test(img_request))
2292 ceph_put_snap_context(img_request->snapc);
2294 kmem_cache_free(rbd_img_request_cache, img_request);
2297 static struct rbd_img_request *rbd_parent_request_create(
2298 struct rbd_obj_request *obj_request,
2299 u64 img_offset, u64 length)
2301 struct rbd_img_request *parent_request;
2302 struct rbd_device *rbd_dev;
2304 rbd_assert(obj_request->img_request);
2305 rbd_dev = obj_request->img_request->rbd_dev;
2307 parent_request = rbd_img_request_create(rbd_dev->parent, img_offset,
2308 length, OBJ_OP_READ, NULL);
2309 if (!parent_request)
2312 img_request_child_set(parent_request);
2313 rbd_obj_request_get(obj_request);
2314 parent_request->obj_request = obj_request;
2316 return parent_request;
2319 static void rbd_parent_request_destroy(struct kref *kref)
2321 struct rbd_img_request *parent_request;
2322 struct rbd_obj_request *orig_request;
2324 parent_request = container_of(kref, struct rbd_img_request, kref);
2325 orig_request = parent_request->obj_request;
2327 parent_request->obj_request = NULL;
2328 rbd_obj_request_put(orig_request);
2329 img_request_child_clear(parent_request);
2331 rbd_img_request_destroy(kref);
2334 static bool rbd_img_obj_end_request(struct rbd_obj_request *obj_request)
2336 struct rbd_img_request *img_request;
2337 unsigned int xferred;
2341 rbd_assert(obj_request_img_data_test(obj_request));
2342 img_request = obj_request->img_request;
2344 rbd_assert(obj_request->xferred <= (u64)UINT_MAX);
2345 xferred = (unsigned int)obj_request->xferred;
2346 result = obj_request->result;
2348 struct rbd_device *rbd_dev = img_request->rbd_dev;
2349 enum obj_operation_type op_type;
2351 if (img_request_discard_test(img_request))
2352 op_type = OBJ_OP_DISCARD;
2353 else if (img_request_write_test(img_request))
2354 op_type = OBJ_OP_WRITE;
2356 op_type = OBJ_OP_READ;
2358 rbd_warn(rbd_dev, "%s %llx at %llx (%llx)",
2359 obj_op_name(op_type), obj_request->length,
2360 obj_request->img_offset, obj_request->offset);
2361 rbd_warn(rbd_dev, " result %d xferred %x",
2363 if (!img_request->result)
2364 img_request->result = result;
2366 * Need to end I/O on the entire obj_request worth of
2367 * bytes in case of error.
2369 xferred = obj_request->length;
2372 /* Image object requests don't own their page array */
2374 if (obj_request->type == OBJ_REQUEST_PAGES) {
2375 obj_request->pages = NULL;
2376 obj_request->page_count = 0;
2379 if (img_request_child_test(img_request)) {
2380 rbd_assert(img_request->obj_request != NULL);
2381 more = obj_request->which < img_request->obj_request_count - 1;
2383 rbd_assert(img_request->rq != NULL);
2385 more = blk_update_request(img_request->rq, result, xferred);
2387 __blk_mq_end_request(img_request->rq, result);
2393 static void rbd_img_obj_callback(struct rbd_obj_request *obj_request)
2395 struct rbd_img_request *img_request;
2396 u32 which = obj_request->which;
2399 rbd_assert(obj_request_img_data_test(obj_request));
2400 img_request = obj_request->img_request;
2402 dout("%s: img %p obj %p\n", __func__, img_request, obj_request);
2403 rbd_assert(img_request != NULL);
2404 rbd_assert(img_request->obj_request_count > 0);
2405 rbd_assert(which != BAD_WHICH);
2406 rbd_assert(which < img_request->obj_request_count);
2408 spin_lock_irq(&img_request->completion_lock);
2409 if (which != img_request->next_completion)
2412 for_each_obj_request_from(img_request, obj_request) {
2414 rbd_assert(which < img_request->obj_request_count);
2416 if (!obj_request_done_test(obj_request))
2418 more = rbd_img_obj_end_request(obj_request);
2422 rbd_assert(more ^ (which == img_request->obj_request_count));
2423 img_request->next_completion = which;
2425 spin_unlock_irq(&img_request->completion_lock);
2426 rbd_img_request_put(img_request);
2429 rbd_img_request_complete(img_request);
2433 * Add individual osd ops to the given ceph_osd_request and prepare
2434 * them for submission. num_ops is the current number of
2435 * osd operations already to the object request.
2437 static void rbd_img_obj_request_fill(struct rbd_obj_request *obj_request,
2438 struct ceph_osd_request *osd_request,
2439 enum obj_operation_type op_type,
2440 unsigned int num_ops)
2442 struct rbd_img_request *img_request = obj_request->img_request;
2443 struct rbd_device *rbd_dev = img_request->rbd_dev;
2444 u64 object_size = rbd_obj_bytes(&rbd_dev->header);
2445 u64 offset = obj_request->offset;
2446 u64 length = obj_request->length;
2450 if (op_type == OBJ_OP_DISCARD) {
2451 if (!offset && length == object_size &&
2452 (!img_request_layered_test(img_request) ||
2453 !obj_request_overlaps_parent(obj_request))) {
2454 opcode = CEPH_OSD_OP_DELETE;
2455 } else if ((offset + length == object_size)) {
2456 opcode = CEPH_OSD_OP_TRUNCATE;
2458 down_read(&rbd_dev->header_rwsem);
2459 img_end = rbd_dev->header.image_size;
2460 up_read(&rbd_dev->header_rwsem);
2462 if (obj_request->img_offset + length == img_end)
2463 opcode = CEPH_OSD_OP_TRUNCATE;
2465 opcode = CEPH_OSD_OP_ZERO;
2467 } else if (op_type == OBJ_OP_WRITE) {
2468 if (!offset && length == object_size)
2469 opcode = CEPH_OSD_OP_WRITEFULL;
2471 opcode = CEPH_OSD_OP_WRITE;
2472 osd_req_op_alloc_hint_init(osd_request, num_ops,
2473 object_size, object_size);
2476 opcode = CEPH_OSD_OP_READ;
2479 if (opcode == CEPH_OSD_OP_DELETE)
2480 osd_req_op_init(osd_request, num_ops, opcode, 0);
2482 osd_req_op_extent_init(osd_request, num_ops, opcode,
2483 offset, length, 0, 0);
2485 if (obj_request->type == OBJ_REQUEST_BIO)
2486 osd_req_op_extent_osd_data_bio(osd_request, num_ops,
2487 obj_request->bio_list, length);
2488 else if (obj_request->type == OBJ_REQUEST_PAGES)
2489 osd_req_op_extent_osd_data_pages(osd_request, num_ops,
2490 obj_request->pages, length,
2491 offset & ~PAGE_MASK, false, false);
2493 /* Discards are also writes */
2494 if (op_type == OBJ_OP_WRITE || op_type == OBJ_OP_DISCARD)
2495 rbd_osd_req_format_write(obj_request);
2497 rbd_osd_req_format_read(obj_request);
2501 * Split up an image request into one or more object requests, each
2502 * to a different object. The "type" parameter indicates whether
2503 * "data_desc" is the pointer to the head of a list of bio
2504 * structures, or the base of a page array. In either case this
2505 * function assumes data_desc describes memory sufficient to hold
2506 * all data described by the image request.
2508 static int rbd_img_request_fill(struct rbd_img_request *img_request,
2509 enum obj_request_type type,
2512 struct rbd_device *rbd_dev = img_request->rbd_dev;
2513 struct rbd_obj_request *obj_request = NULL;
2514 struct rbd_obj_request *next_obj_request;
2515 struct bio *bio_list = NULL;
2516 unsigned int bio_offset = 0;
2517 struct page **pages = NULL;
2518 enum obj_operation_type op_type;
2522 dout("%s: img %p type %d data_desc %p\n", __func__, img_request,
2523 (int)type, data_desc);
2525 img_offset = img_request->offset;
2526 resid = img_request->length;
2527 rbd_assert(resid > 0);
2528 op_type = rbd_img_request_op_type(img_request);
2530 if (type == OBJ_REQUEST_BIO) {
2531 bio_list = data_desc;
2532 rbd_assert(img_offset ==
2533 bio_list->bi_iter.bi_sector << SECTOR_SHIFT);
2534 } else if (type == OBJ_REQUEST_PAGES) {
2539 struct ceph_osd_request *osd_req;
2540 const char *object_name;
2544 object_name = rbd_segment_name(rbd_dev, img_offset);
2547 offset = rbd_segment_offset(rbd_dev, img_offset);
2548 length = rbd_segment_length(rbd_dev, img_offset, resid);
2549 obj_request = rbd_obj_request_create(object_name,
2550 offset, length, type);
2551 /* object request has its own copy of the object name */
2552 rbd_segment_name_free(object_name);
2557 * set obj_request->img_request before creating the
2558 * osd_request so that it gets the right snapc
2560 rbd_img_obj_request_add(img_request, obj_request);
2562 if (type == OBJ_REQUEST_BIO) {
2563 unsigned int clone_size;
2565 rbd_assert(length <= (u64)UINT_MAX);
2566 clone_size = (unsigned int)length;
2567 obj_request->bio_list =
2568 bio_chain_clone_range(&bio_list,
2572 if (!obj_request->bio_list)
2574 } else if (type == OBJ_REQUEST_PAGES) {
2575 unsigned int page_count;
2577 obj_request->pages = pages;
2578 page_count = (u32)calc_pages_for(offset, length);
2579 obj_request->page_count = page_count;
2580 if ((offset + length) & ~PAGE_MASK)
2581 page_count--; /* more on last page */
2582 pages += page_count;
2585 osd_req = rbd_osd_req_create(rbd_dev, op_type,
2586 (op_type == OBJ_OP_WRITE) ? 2 : 1,
2591 obj_request->osd_req = osd_req;
2592 obj_request->callback = rbd_img_obj_callback;
2593 obj_request->img_offset = img_offset;
2595 rbd_img_obj_request_fill(obj_request, osd_req, op_type, 0);
2597 img_offset += length;
2604 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
2605 rbd_img_obj_request_del(img_request, obj_request);
2611 rbd_osd_copyup_callback(struct rbd_obj_request *obj_request)
2613 struct rbd_img_request *img_request;
2614 struct rbd_device *rbd_dev;
2615 struct page **pages;
2618 dout("%s: obj %p\n", __func__, obj_request);
2620 rbd_assert(obj_request->type == OBJ_REQUEST_BIO ||
2621 obj_request->type == OBJ_REQUEST_NODATA);
2622 rbd_assert(obj_request_img_data_test(obj_request));
2623 img_request = obj_request->img_request;
2624 rbd_assert(img_request);
2626 rbd_dev = img_request->rbd_dev;
2627 rbd_assert(rbd_dev);
2629 pages = obj_request->copyup_pages;
2630 rbd_assert(pages != NULL);
2631 obj_request->copyup_pages = NULL;
2632 page_count = obj_request->copyup_page_count;
2633 rbd_assert(page_count);
2634 obj_request->copyup_page_count = 0;
2635 ceph_release_page_vector(pages, page_count);
2638 * We want the transfer count to reflect the size of the
2639 * original write request. There is no such thing as a
2640 * successful short write, so if the request was successful
2641 * we can just set it to the originally-requested length.
2643 if (!obj_request->result)
2644 obj_request->xferred = obj_request->length;
2646 obj_request_done_set(obj_request);
2650 rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request)
2652 struct rbd_obj_request *orig_request;
2653 struct ceph_osd_request *osd_req;
2654 struct rbd_device *rbd_dev;
2655 struct page **pages;
2656 enum obj_operation_type op_type;
2661 rbd_assert(img_request_child_test(img_request));
2663 /* First get what we need from the image request */
2665 pages = img_request->copyup_pages;
2666 rbd_assert(pages != NULL);
2667 img_request->copyup_pages = NULL;
2668 page_count = img_request->copyup_page_count;
2669 rbd_assert(page_count);
2670 img_request->copyup_page_count = 0;
2672 orig_request = img_request->obj_request;
2673 rbd_assert(orig_request != NULL);
2674 rbd_assert(obj_request_type_valid(orig_request->type));
2675 img_result = img_request->result;
2676 parent_length = img_request->length;
2677 rbd_assert(img_result || parent_length == img_request->xferred);
2678 rbd_img_request_put(img_request);
2680 rbd_assert(orig_request->img_request);
2681 rbd_dev = orig_request->img_request->rbd_dev;
2682 rbd_assert(rbd_dev);
2685 * If the overlap has become 0 (most likely because the
2686 * image has been flattened) we need to free the pages
2687 * and re-submit the original write request.
2689 if (!rbd_dev->parent_overlap) {
2690 ceph_release_page_vector(pages, page_count);
2691 rbd_obj_request_submit(orig_request);
2699 * The original osd request is of no use to use any more.
2700 * We need a new one that can hold the three ops in a copyup
2701 * request. Allocate the new copyup osd request for the
2702 * original request, and release the old one.
2704 img_result = -ENOMEM;
2705 osd_req = rbd_osd_req_create_copyup(orig_request);
2708 rbd_osd_req_destroy(orig_request->osd_req);
2709 orig_request->osd_req = osd_req;
2710 orig_request->copyup_pages = pages;
2711 orig_request->copyup_page_count = page_count;
2713 /* Initialize the copyup op */
2715 osd_req_op_cls_init(osd_req, 0, CEPH_OSD_OP_CALL, "rbd", "copyup");
2716 osd_req_op_cls_request_data_pages(osd_req, 0, pages, parent_length, 0,
2719 /* Add the other op(s) */
2721 op_type = rbd_img_request_op_type(orig_request->img_request);
2722 rbd_img_obj_request_fill(orig_request, osd_req, op_type, 1);
2724 /* All set, send it off. */
2726 rbd_obj_request_submit(orig_request);
2730 ceph_release_page_vector(pages, page_count);
2731 orig_request->result = img_result;
2732 orig_request->xferred = 0;
2733 rbd_img_request_get(orig_request->img_request);
2734 obj_request_done_set(orig_request);
2735 rbd_obj_request_complete(orig_request);
2739 * Read from the parent image the range of data that covers the
2740 * entire target of the given object request. This is used for
2741 * satisfying a layered image write request when the target of an
2742 * object request from the image request does not exist.
2744 * A page array big enough to hold the returned data is allocated
2745 * and supplied to rbd_img_request_fill() as the "data descriptor."
2746 * When the read completes, this page array will be transferred to
2747 * the original object request for the copyup operation.
2749 * If an error occurs, it is recorded as the result of the original
2750 * object request in rbd_img_obj_exists_callback().
2752 static int rbd_img_obj_parent_read_full(struct rbd_obj_request *obj_request)
2754 struct rbd_device *rbd_dev = obj_request->img_request->rbd_dev;
2755 struct rbd_img_request *parent_request = NULL;
2758 struct page **pages = NULL;
2762 rbd_assert(rbd_dev->parent != NULL);
2765 * Determine the byte range covered by the object in the
2766 * child image to which the original request was to be sent.
2768 img_offset = obj_request->img_offset - obj_request->offset;
2769 length = (u64)1 << rbd_dev->header.obj_order;
2772 * There is no defined parent data beyond the parent
2773 * overlap, so limit what we read at that boundary if
2776 if (img_offset + length > rbd_dev->parent_overlap) {
2777 rbd_assert(img_offset < rbd_dev->parent_overlap);
2778 length = rbd_dev->parent_overlap - img_offset;
2782 * Allocate a page array big enough to receive the data read
2785 page_count = (u32)calc_pages_for(0, length);
2786 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2787 if (IS_ERR(pages)) {
2788 result = PTR_ERR(pages);
2794 parent_request = rbd_parent_request_create(obj_request,
2795 img_offset, length);
2796 if (!parent_request)
2799 result = rbd_img_request_fill(parent_request, OBJ_REQUEST_PAGES, pages);
2803 parent_request->copyup_pages = pages;
2804 parent_request->copyup_page_count = page_count;
2805 parent_request->callback = rbd_img_obj_parent_read_full_callback;
2807 result = rbd_img_request_submit(parent_request);
2811 parent_request->copyup_pages = NULL;
2812 parent_request->copyup_page_count = 0;
2813 parent_request->obj_request = NULL;
2814 rbd_obj_request_put(obj_request);
2817 ceph_release_page_vector(pages, page_count);
2819 rbd_img_request_put(parent_request);
2823 static void rbd_img_obj_exists_callback(struct rbd_obj_request *obj_request)
2825 struct rbd_obj_request *orig_request;
2826 struct rbd_device *rbd_dev;
2829 rbd_assert(!obj_request_img_data_test(obj_request));
2832 * All we need from the object request is the original
2833 * request and the result of the STAT op. Grab those, then
2834 * we're done with the request.
2836 orig_request = obj_request->obj_request;
2837 obj_request->obj_request = NULL;
2838 rbd_obj_request_put(orig_request);
2839 rbd_assert(orig_request);
2840 rbd_assert(orig_request->img_request);
2842 result = obj_request->result;
2843 obj_request->result = 0;
2845 dout("%s: obj %p for obj %p result %d %llu/%llu\n", __func__,
2846 obj_request, orig_request, result,
2847 obj_request->xferred, obj_request->length);
2848 rbd_obj_request_put(obj_request);
2851 * If the overlap has become 0 (most likely because the
2852 * image has been flattened) we need to re-submit the
2855 rbd_dev = orig_request->img_request->rbd_dev;
2856 if (!rbd_dev->parent_overlap) {
2857 rbd_obj_request_submit(orig_request);
2862 * Our only purpose here is to determine whether the object
2863 * exists, and we don't want to treat the non-existence as
2864 * an error. If something else comes back, transfer the
2865 * error to the original request and complete it now.
2868 obj_request_existence_set(orig_request, true);
2869 } else if (result == -ENOENT) {
2870 obj_request_existence_set(orig_request, false);
2872 goto fail_orig_request;
2876 * Resubmit the original request now that we have recorded
2877 * whether the target object exists.
2879 result = rbd_img_obj_request_submit(orig_request);
2881 goto fail_orig_request;
2886 orig_request->result = result;
2887 orig_request->xferred = 0;
2888 rbd_img_request_get(orig_request->img_request);
2889 obj_request_done_set(orig_request);
2890 rbd_obj_request_complete(orig_request);
2893 static int rbd_img_obj_exists_submit(struct rbd_obj_request *obj_request)
2895 struct rbd_device *rbd_dev = obj_request->img_request->rbd_dev;
2896 struct rbd_obj_request *stat_request;
2897 struct page **pages;
2902 stat_request = rbd_obj_request_create(obj_request->object_name, 0, 0,
2907 stat_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_READ, 1,
2909 if (!stat_request->osd_req) {
2911 goto fail_stat_request;
2915 * The response data for a STAT call consists of:
2922 size = sizeof (__le64) + sizeof (__le32) + sizeof (__le32);
2923 page_count = (u32)calc_pages_for(0, size);
2924 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2925 if (IS_ERR(pages)) {
2926 ret = PTR_ERR(pages);
2927 goto fail_stat_request;
2930 osd_req_op_init(stat_request->osd_req, 0, CEPH_OSD_OP_STAT, 0);
2931 osd_req_op_raw_data_in_pages(stat_request->osd_req, 0, pages, size, 0,
2934 rbd_obj_request_get(obj_request);
2935 stat_request->obj_request = obj_request;
2936 stat_request->pages = pages;
2937 stat_request->page_count = page_count;
2938 stat_request->callback = rbd_img_obj_exists_callback;
2940 rbd_osd_req_format_read(stat_request);
2942 rbd_obj_request_submit(stat_request);
2946 rbd_obj_request_put(stat_request);
2950 static bool img_obj_request_simple(struct rbd_obj_request *obj_request)
2952 struct rbd_img_request *img_request = obj_request->img_request;
2953 struct rbd_device *rbd_dev = img_request->rbd_dev;
2956 if (!img_request_write_test(img_request) &&
2957 !img_request_discard_test(img_request))
2960 /* Non-layered writes */
2961 if (!img_request_layered_test(img_request))
2965 * Layered writes outside of the parent overlap range don't
2966 * share any data with the parent.
2968 if (!obj_request_overlaps_parent(obj_request))
2972 * Entire-object layered writes - we will overwrite whatever
2973 * parent data there is anyway.
2975 if (!obj_request->offset &&
2976 obj_request->length == rbd_obj_bytes(&rbd_dev->header))
2980 * If the object is known to already exist, its parent data has
2981 * already been copied.
2983 if (obj_request_known_test(obj_request) &&
2984 obj_request_exists_test(obj_request))
2990 static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request)
2992 rbd_assert(obj_request_img_data_test(obj_request));
2993 rbd_assert(obj_request_type_valid(obj_request->type));
2994 rbd_assert(obj_request->img_request);
2996 if (img_obj_request_simple(obj_request)) {
2997 rbd_obj_request_submit(obj_request);
3002 * It's a layered write. The target object might exist but
3003 * we may not know that yet. If we know it doesn't exist,
3004 * start by reading the data for the full target object from
3005 * the parent so we can use it for a copyup to the target.
3007 if (obj_request_known_test(obj_request))
3008 return rbd_img_obj_parent_read_full(obj_request);
3010 /* We don't know whether the target exists. Go find out. */
3012 return rbd_img_obj_exists_submit(obj_request);
3015 static int rbd_img_request_submit(struct rbd_img_request *img_request)
3017 struct rbd_obj_request *obj_request;
3018 struct rbd_obj_request *next_obj_request;
3021 dout("%s: img %p\n", __func__, img_request);
3023 rbd_img_request_get(img_request);
3024 for_each_obj_request_safe(img_request, obj_request, next_obj_request) {
3025 ret = rbd_img_obj_request_submit(obj_request);
3031 rbd_img_request_put(img_request);
3035 static void rbd_img_parent_read_callback(struct rbd_img_request *img_request)
3037 struct rbd_obj_request *obj_request;
3038 struct rbd_device *rbd_dev;
3043 rbd_assert(img_request_child_test(img_request));
3045 /* First get what we need from the image request and release it */
3047 obj_request = img_request->obj_request;
3048 img_xferred = img_request->xferred;
3049 img_result = img_request->result;
3050 rbd_img_request_put(img_request);
3053 * If the overlap has become 0 (most likely because the
3054 * image has been flattened) we need to re-submit the
3057 rbd_assert(obj_request);
3058 rbd_assert(obj_request->img_request);
3059 rbd_dev = obj_request->img_request->rbd_dev;
3060 if (!rbd_dev->parent_overlap) {
3061 rbd_obj_request_submit(obj_request);
3065 obj_request->result = img_result;
3066 if (obj_request->result)
3070 * We need to zero anything beyond the parent overlap
3071 * boundary. Since rbd_img_obj_request_read_callback()
3072 * will zero anything beyond the end of a short read, an
3073 * easy way to do this is to pretend the data from the
3074 * parent came up short--ending at the overlap boundary.
3076 rbd_assert(obj_request->img_offset < U64_MAX - obj_request->length);
3077 obj_end = obj_request->img_offset + obj_request->length;
3078 if (obj_end > rbd_dev->parent_overlap) {
3081 if (obj_request->img_offset < rbd_dev->parent_overlap)
3082 xferred = rbd_dev->parent_overlap -
3083 obj_request->img_offset;
3085 obj_request->xferred = min(img_xferred, xferred);
3087 obj_request->xferred = img_xferred;
3090 rbd_img_obj_request_read_callback(obj_request);
3091 rbd_obj_request_complete(obj_request);
3094 static void rbd_img_parent_read(struct rbd_obj_request *obj_request)
3096 struct rbd_img_request *img_request;
3099 rbd_assert(obj_request_img_data_test(obj_request));
3100 rbd_assert(obj_request->img_request != NULL);
3101 rbd_assert(obj_request->result == (s32) -ENOENT);
3102 rbd_assert(obj_request_type_valid(obj_request->type));
3104 /* rbd_read_finish(obj_request, obj_request->length); */
3105 img_request = rbd_parent_request_create(obj_request,
3106 obj_request->img_offset,
3107 obj_request->length);
3112 if (obj_request->type == OBJ_REQUEST_BIO)
3113 result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
3114 obj_request->bio_list);
3116 result = rbd_img_request_fill(img_request, OBJ_REQUEST_PAGES,
3117 obj_request->pages);
3121 img_request->callback = rbd_img_parent_read_callback;
3122 result = rbd_img_request_submit(img_request);
3129 rbd_img_request_put(img_request);
3130 obj_request->result = result;
3131 obj_request->xferred = 0;
3132 obj_request_done_set(obj_request);
3135 static const struct rbd_client_id rbd_empty_cid;
3137 static bool rbd_cid_equal(const struct rbd_client_id *lhs,
3138 const struct rbd_client_id *rhs)
3140 return lhs->gid == rhs->gid && lhs->handle == rhs->handle;
3143 static struct rbd_client_id rbd_get_cid(struct rbd_device *rbd_dev)
3145 struct rbd_client_id cid;
3147 mutex_lock(&rbd_dev->watch_mutex);
3148 cid.gid = ceph_client_gid(rbd_dev->rbd_client->client);
3149 cid.handle = rbd_dev->watch_cookie;
3150 mutex_unlock(&rbd_dev->watch_mutex);
3155 * lock_rwsem must be held for write
3157 static void rbd_set_owner_cid(struct rbd_device *rbd_dev,
3158 const struct rbd_client_id *cid)
3160 dout("%s rbd_dev %p %llu-%llu -> %llu-%llu\n", __func__, rbd_dev,
3161 rbd_dev->owner_cid.gid, rbd_dev->owner_cid.handle,
3162 cid->gid, cid->handle);
3163 rbd_dev->owner_cid = *cid; /* struct */
3166 static void format_lock_cookie(struct rbd_device *rbd_dev, char *buf)
3168 mutex_lock(&rbd_dev->watch_mutex);
3169 sprintf(buf, "%s %llu", RBD_LOCK_COOKIE_PREFIX, rbd_dev->watch_cookie);
3170 mutex_unlock(&rbd_dev->watch_mutex);
3174 * lock_rwsem must be held for write
3176 static int rbd_lock(struct rbd_device *rbd_dev)
3178 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3179 struct rbd_client_id cid = rbd_get_cid(rbd_dev);
3183 WARN_ON(__rbd_is_lock_owner(rbd_dev));
3185 format_lock_cookie(rbd_dev, cookie);
3186 ret = ceph_cls_lock(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
3187 RBD_LOCK_NAME, CEPH_CLS_LOCK_EXCLUSIVE, cookie,
3188 RBD_LOCK_TAG, "", 0);
3192 rbd_dev->lock_state = RBD_LOCK_STATE_LOCKED;
3193 rbd_set_owner_cid(rbd_dev, &cid);
3194 queue_work(rbd_dev->task_wq, &rbd_dev->acquired_lock_work);
3199 * lock_rwsem must be held for write
3201 static int rbd_unlock(struct rbd_device *rbd_dev)
3203 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3207 WARN_ON(!__rbd_is_lock_owner(rbd_dev));
3209 rbd_dev->lock_state = RBD_LOCK_STATE_UNLOCKED;
3211 format_lock_cookie(rbd_dev, cookie);
3212 ret = ceph_cls_unlock(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
3213 RBD_LOCK_NAME, cookie);
3214 if (ret && ret != -ENOENT) {
3215 rbd_warn(rbd_dev, "cls_unlock failed: %d", ret);
3219 rbd_set_owner_cid(rbd_dev, &rbd_empty_cid);
3220 queue_work(rbd_dev->task_wq, &rbd_dev->released_lock_work);
3224 static int __rbd_notify_op_lock(struct rbd_device *rbd_dev,
3225 enum rbd_notify_op notify_op,
3226 struct page ***preply_pages,
3229 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3230 struct rbd_client_id cid = rbd_get_cid(rbd_dev);
3231 int buf_size = 4 + 8 + 8 + CEPH_ENCODING_START_BLK_LEN;
3235 dout("%s rbd_dev %p notify_op %d\n", __func__, rbd_dev, notify_op);
3237 /* encode *LockPayload NotifyMessage (op + ClientId) */
3238 ceph_start_encoding(&p, 2, 1, buf_size - CEPH_ENCODING_START_BLK_LEN);
3239 ceph_encode_32(&p, notify_op);
3240 ceph_encode_64(&p, cid.gid);
3241 ceph_encode_64(&p, cid.handle);
3243 return ceph_osdc_notify(osdc, &rbd_dev->header_oid,
3244 &rbd_dev->header_oloc, buf, buf_size,
3245 RBD_NOTIFY_TIMEOUT, preply_pages, preply_len);
3248 static void rbd_notify_op_lock(struct rbd_device *rbd_dev,
3249 enum rbd_notify_op notify_op)
3251 struct page **reply_pages;
3254 __rbd_notify_op_lock(rbd_dev, notify_op, &reply_pages, &reply_len);
3255 ceph_release_page_vector(reply_pages, calc_pages_for(0, reply_len));
3258 static void rbd_notify_acquired_lock(struct work_struct *work)
3260 struct rbd_device *rbd_dev = container_of(work, struct rbd_device,
3261 acquired_lock_work);
3263 rbd_notify_op_lock(rbd_dev, RBD_NOTIFY_OP_ACQUIRED_LOCK);
3266 static void rbd_notify_released_lock(struct work_struct *work)
3268 struct rbd_device *rbd_dev = container_of(work, struct rbd_device,
3269 released_lock_work);
3271 rbd_notify_op_lock(rbd_dev, RBD_NOTIFY_OP_RELEASED_LOCK);
3274 static int rbd_request_lock(struct rbd_device *rbd_dev)
3276 struct page **reply_pages;
3278 bool lock_owner_responded = false;
3281 dout("%s rbd_dev %p\n", __func__, rbd_dev);
3283 ret = __rbd_notify_op_lock(rbd_dev, RBD_NOTIFY_OP_REQUEST_LOCK,
3284 &reply_pages, &reply_len);
3285 if (ret && ret != -ETIMEDOUT) {
3286 rbd_warn(rbd_dev, "failed to request lock: %d", ret);
3290 if (reply_len > 0 && reply_len <= PAGE_SIZE) {
3291 void *p = page_address(reply_pages[0]);
3292 void *const end = p + reply_len;
3295 ceph_decode_32_safe(&p, end, n, e_inval); /* num_acks */
3300 ceph_decode_need(&p, end, 8 + 8, e_inval);
3301 p += 8 + 8; /* skip gid and cookie */
3303 ceph_decode_32_safe(&p, end, len, e_inval);
3307 if (lock_owner_responded) {
3309 "duplicate lock owners detected");
3314 lock_owner_responded = true;
3315 ret = ceph_start_decoding(&p, end, 1, "ResponseMessage",
3319 "failed to decode ResponseMessage: %d",
3324 ret = ceph_decode_32(&p);
3328 if (!lock_owner_responded) {
3329 rbd_warn(rbd_dev, "no lock owners detected");
3334 ceph_release_page_vector(reply_pages, calc_pages_for(0, reply_len));
3342 static void wake_requests(struct rbd_device *rbd_dev, bool wake_all)
3344 dout("%s rbd_dev %p wake_all %d\n", __func__, rbd_dev, wake_all);
3346 cancel_delayed_work(&rbd_dev->lock_dwork);
3348 wake_up_all(&rbd_dev->lock_waitq);
3350 wake_up(&rbd_dev->lock_waitq);
3353 static int get_lock_owner_info(struct rbd_device *rbd_dev,
3354 struct ceph_locker **lockers, u32 *num_lockers)
3356 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3361 dout("%s rbd_dev %p\n", __func__, rbd_dev);
3363 ret = ceph_cls_lock_info(osdc, &rbd_dev->header_oid,
3364 &rbd_dev->header_oloc, RBD_LOCK_NAME,
3365 &lock_type, &lock_tag, lockers, num_lockers);
3369 if (*num_lockers == 0) {
3370 dout("%s rbd_dev %p no lockers detected\n", __func__, rbd_dev);
3374 if (strcmp(lock_tag, RBD_LOCK_TAG)) {
3375 rbd_warn(rbd_dev, "locked by external mechanism, tag %s",
3381 if (lock_type == CEPH_CLS_LOCK_SHARED) {
3382 rbd_warn(rbd_dev, "shared lock type detected");
3387 if (strncmp((*lockers)[0].id.cookie, RBD_LOCK_COOKIE_PREFIX,
3388 strlen(RBD_LOCK_COOKIE_PREFIX))) {
3389 rbd_warn(rbd_dev, "locked by external mechanism, cookie %s",
3390 (*lockers)[0].id.cookie);
3400 static int find_watcher(struct rbd_device *rbd_dev,
3401 const struct ceph_locker *locker)
3403 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3404 struct ceph_watch_item *watchers;
3410 ret = ceph_osdc_list_watchers(osdc, &rbd_dev->header_oid,
3411 &rbd_dev->header_oloc, &watchers,
3416 sscanf(locker->id.cookie, RBD_LOCK_COOKIE_PREFIX " %llu", &cookie);
3417 for (i = 0; i < num_watchers; i++) {
3418 if (!memcmp(&watchers[i].addr, &locker->info.addr,
3419 sizeof(locker->info.addr)) &&
3420 watchers[i].cookie == cookie) {
3421 struct rbd_client_id cid = {
3422 .gid = le64_to_cpu(watchers[i].name.num),
3426 dout("%s rbd_dev %p found cid %llu-%llu\n", __func__,
3427 rbd_dev, cid.gid, cid.handle);
3428 rbd_set_owner_cid(rbd_dev, &cid);
3434 dout("%s rbd_dev %p no watchers\n", __func__, rbd_dev);
3442 * lock_rwsem must be held for write
3444 static int rbd_try_lock(struct rbd_device *rbd_dev)
3446 struct ceph_client *client = rbd_dev->rbd_client->client;
3447 struct ceph_locker *lockers;
3452 ret = rbd_lock(rbd_dev);
3456 /* determine if the current lock holder is still alive */
3457 ret = get_lock_owner_info(rbd_dev, &lockers, &num_lockers);
3461 if (num_lockers == 0)
3464 ret = find_watcher(rbd_dev, lockers);
3467 ret = 0; /* have to request lock */
3471 rbd_warn(rbd_dev, "%s%llu seems dead, breaking lock",
3472 ENTITY_NAME(lockers[0].id.name));
3474 ret = ceph_monc_blacklist_add(&client->monc,
3475 &lockers[0].info.addr);
3477 rbd_warn(rbd_dev, "blacklist of %s%llu failed: %d",
3478 ENTITY_NAME(lockers[0].id.name), ret);
3482 ret = ceph_cls_break_lock(&client->osdc, &rbd_dev->header_oid,
3483 &rbd_dev->header_oloc, RBD_LOCK_NAME,
3484 lockers[0].id.cookie,
3485 &lockers[0].id.name);
3486 if (ret && ret != -ENOENT)
3490 ceph_free_lockers(lockers, num_lockers);
3494 ceph_free_lockers(lockers, num_lockers);
3499 * ret is set only if lock_state is RBD_LOCK_STATE_UNLOCKED
3501 static enum rbd_lock_state rbd_try_acquire_lock(struct rbd_device *rbd_dev,
3504 enum rbd_lock_state lock_state;
3506 down_read(&rbd_dev->lock_rwsem);
3507 dout("%s rbd_dev %p read lock_state %d\n", __func__, rbd_dev,
3508 rbd_dev->lock_state);
3509 if (__rbd_is_lock_owner(rbd_dev)) {
3510 lock_state = rbd_dev->lock_state;
3511 up_read(&rbd_dev->lock_rwsem);
3515 up_read(&rbd_dev->lock_rwsem);
3516 down_write(&rbd_dev->lock_rwsem);
3517 dout("%s rbd_dev %p write lock_state %d\n", __func__, rbd_dev,
3518 rbd_dev->lock_state);
3519 if (!__rbd_is_lock_owner(rbd_dev)) {
3520 *pret = rbd_try_lock(rbd_dev);
3522 rbd_warn(rbd_dev, "failed to acquire lock: %d", *pret);
3525 lock_state = rbd_dev->lock_state;
3526 up_write(&rbd_dev->lock_rwsem);
3530 static void rbd_acquire_lock(struct work_struct *work)
3532 struct rbd_device *rbd_dev = container_of(to_delayed_work(work),
3533 struct rbd_device, lock_dwork);
3534 enum rbd_lock_state lock_state;
3537 dout("%s rbd_dev %p\n", __func__, rbd_dev);
3539 lock_state = rbd_try_acquire_lock(rbd_dev, &ret);
3540 if (lock_state != RBD_LOCK_STATE_UNLOCKED || ret == -EBLACKLISTED) {
3541 if (lock_state == RBD_LOCK_STATE_LOCKED)
3542 wake_requests(rbd_dev, true);
3543 dout("%s rbd_dev %p lock_state %d ret %d - done\n", __func__,
3544 rbd_dev, lock_state, ret);
3548 ret = rbd_request_lock(rbd_dev);
3549 if (ret == -ETIMEDOUT) {
3550 goto again; /* treat this as a dead client */
3551 } else if (ret < 0) {
3552 rbd_warn(rbd_dev, "error requesting lock: %d", ret);
3553 mod_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork,
3557 * lock owner acked, but resend if we don't see them
3560 dout("%s rbd_dev %p requeueing lock_dwork\n", __func__,
3562 mod_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork,
3563 msecs_to_jiffies(2 * RBD_NOTIFY_TIMEOUT * MSEC_PER_SEC));
3568 * lock_rwsem must be held for write
3570 static bool rbd_release_lock(struct rbd_device *rbd_dev)
3572 dout("%s rbd_dev %p read lock_state %d\n", __func__, rbd_dev,
3573 rbd_dev->lock_state);
3574 if (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED)
3577 rbd_dev->lock_state = RBD_LOCK_STATE_RELEASING;
3578 downgrade_write(&rbd_dev->lock_rwsem);
3580 * Ensure that all in-flight IO is flushed.
3582 * FIXME: ceph_osdc_sync() flushes the entire OSD client, which
3583 * may be shared with other devices.
3585 ceph_osdc_sync(&rbd_dev->rbd_client->client->osdc);
3586 up_read(&rbd_dev->lock_rwsem);
3588 down_write(&rbd_dev->lock_rwsem);
3589 dout("%s rbd_dev %p write lock_state %d\n", __func__, rbd_dev,
3590 rbd_dev->lock_state);
3591 if (rbd_dev->lock_state != RBD_LOCK_STATE_RELEASING)
3594 if (!rbd_unlock(rbd_dev))
3596 * Give others a chance to grab the lock - we would re-acquire
3597 * almost immediately if we got new IO during ceph_osdc_sync()
3598 * otherwise. We need to ack our own notifications, so this
3599 * lock_dwork will be requeued from rbd_wait_state_locked()
3600 * after wake_requests() in rbd_handle_released_lock().
3602 cancel_delayed_work(&rbd_dev->lock_dwork);
3607 static void rbd_release_lock_work(struct work_struct *work)
3609 struct rbd_device *rbd_dev = container_of(work, struct rbd_device,
3612 down_write(&rbd_dev->lock_rwsem);
3613 rbd_release_lock(rbd_dev);
3614 up_write(&rbd_dev->lock_rwsem);
3617 static void rbd_handle_acquired_lock(struct rbd_device *rbd_dev, u8 struct_v,
3620 struct rbd_client_id cid = { 0 };
3622 if (struct_v >= 2) {
3623 cid.gid = ceph_decode_64(p);
3624 cid.handle = ceph_decode_64(p);
3627 dout("%s rbd_dev %p cid %llu-%llu\n", __func__, rbd_dev, cid.gid,
3629 if (!rbd_cid_equal(&cid, &rbd_empty_cid)) {
3630 down_write(&rbd_dev->lock_rwsem);
3631 if (rbd_cid_equal(&cid, &rbd_dev->owner_cid)) {
3633 * we already know that the remote client is
3636 up_write(&rbd_dev->lock_rwsem);
3640 rbd_set_owner_cid(rbd_dev, &cid);
3641 downgrade_write(&rbd_dev->lock_rwsem);
3643 down_read(&rbd_dev->lock_rwsem);
3646 if (!__rbd_is_lock_owner(rbd_dev))
3647 wake_requests(rbd_dev, false);
3648 up_read(&rbd_dev->lock_rwsem);
3651 static void rbd_handle_released_lock(struct rbd_device *rbd_dev, u8 struct_v,
3654 struct rbd_client_id cid = { 0 };
3656 if (struct_v >= 2) {
3657 cid.gid = ceph_decode_64(p);
3658 cid.handle = ceph_decode_64(p);
3661 dout("%s rbd_dev %p cid %llu-%llu\n", __func__, rbd_dev, cid.gid,
3663 if (!rbd_cid_equal(&cid, &rbd_empty_cid)) {
3664 down_write(&rbd_dev->lock_rwsem);
3665 if (!rbd_cid_equal(&cid, &rbd_dev->owner_cid)) {
3666 dout("%s rbd_dev %p unexpected owner, cid %llu-%llu != owner_cid %llu-%llu\n",
3667 __func__, rbd_dev, cid.gid, cid.handle,
3668 rbd_dev->owner_cid.gid, rbd_dev->owner_cid.handle);
3669 up_write(&rbd_dev->lock_rwsem);
3673 rbd_set_owner_cid(rbd_dev, &rbd_empty_cid);
3674 downgrade_write(&rbd_dev->lock_rwsem);
3676 down_read(&rbd_dev->lock_rwsem);
3679 if (!__rbd_is_lock_owner(rbd_dev))
3680 wake_requests(rbd_dev, false);
3681 up_read(&rbd_dev->lock_rwsem);
3684 static bool rbd_handle_request_lock(struct rbd_device *rbd_dev, u8 struct_v,
3687 struct rbd_client_id my_cid = rbd_get_cid(rbd_dev);
3688 struct rbd_client_id cid = { 0 };
3691 if (struct_v >= 2) {
3692 cid.gid = ceph_decode_64(p);
3693 cid.handle = ceph_decode_64(p);
3696 dout("%s rbd_dev %p cid %llu-%llu\n", __func__, rbd_dev, cid.gid,
3698 if (rbd_cid_equal(&cid, &my_cid))
3701 down_read(&rbd_dev->lock_rwsem);
3702 need_to_send = __rbd_is_lock_owner(rbd_dev);
3703 if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED) {
3704 if (!rbd_cid_equal(&rbd_dev->owner_cid, &rbd_empty_cid)) {
3705 dout("%s rbd_dev %p queueing unlock_work\n", __func__,
3707 queue_work(rbd_dev->task_wq, &rbd_dev->unlock_work);
3710 up_read(&rbd_dev->lock_rwsem);
3711 return need_to_send;
3714 static void __rbd_acknowledge_notify(struct rbd_device *rbd_dev,
3715 u64 notify_id, u64 cookie, s32 *result)
3717 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3718 int buf_size = 4 + CEPH_ENCODING_START_BLK_LEN;
3725 /* encode ResponseMessage */
3726 ceph_start_encoding(&p, 1, 1,
3727 buf_size - CEPH_ENCODING_START_BLK_LEN);
3728 ceph_encode_32(&p, *result);
3733 ret = ceph_osdc_notify_ack(osdc, &rbd_dev->header_oid,
3734 &rbd_dev->header_oloc, notify_id, cookie,
3737 rbd_warn(rbd_dev, "acknowledge_notify failed: %d", ret);
3740 static void rbd_acknowledge_notify(struct rbd_device *rbd_dev, u64 notify_id,
3743 dout("%s rbd_dev %p\n", __func__, rbd_dev);
3744 __rbd_acknowledge_notify(rbd_dev, notify_id, cookie, NULL);
3747 static void rbd_acknowledge_notify_result(struct rbd_device *rbd_dev,
3748 u64 notify_id, u64 cookie, s32 result)
3750 dout("%s rbd_dev %p result %d\n", __func__, rbd_dev, result);
3751 __rbd_acknowledge_notify(rbd_dev, notify_id, cookie, &result);
3754 static void rbd_watch_cb(void *arg, u64 notify_id, u64 cookie,
3755 u64 notifier_id, void *data, size_t data_len)
3757 struct rbd_device *rbd_dev = arg;
3759 void *const end = p + data_len;
3765 dout("%s rbd_dev %p cookie %llu notify_id %llu data_len %zu\n",
3766 __func__, rbd_dev, cookie, notify_id, data_len);
3768 ret = ceph_start_decoding(&p, end, 1, "NotifyMessage",
3771 rbd_warn(rbd_dev, "failed to decode NotifyMessage: %d",
3776 notify_op = ceph_decode_32(&p);
3778 /* legacy notification for header updates */
3779 notify_op = RBD_NOTIFY_OP_HEADER_UPDATE;
3783 dout("%s rbd_dev %p notify_op %u\n", __func__, rbd_dev, notify_op);
3784 switch (notify_op) {
3785 case RBD_NOTIFY_OP_ACQUIRED_LOCK:
3786 rbd_handle_acquired_lock(rbd_dev, struct_v, &p);
3787 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
3789 case RBD_NOTIFY_OP_RELEASED_LOCK:
3790 rbd_handle_released_lock(rbd_dev, struct_v, &p);
3791 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
3793 case RBD_NOTIFY_OP_REQUEST_LOCK:
3794 if (rbd_handle_request_lock(rbd_dev, struct_v, &p))
3796 * send ResponseMessage(0) back so the client
3797 * can detect a missing owner
3799 rbd_acknowledge_notify_result(rbd_dev, notify_id,
3802 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
3804 case RBD_NOTIFY_OP_HEADER_UPDATE:
3805 ret = rbd_dev_refresh(rbd_dev);
3807 rbd_warn(rbd_dev, "refresh failed: %d", ret);
3809 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
3812 if (rbd_is_lock_owner(rbd_dev))
3813 rbd_acknowledge_notify_result(rbd_dev, notify_id,
3814 cookie, -EOPNOTSUPP);
3816 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
3821 static void __rbd_unregister_watch(struct rbd_device *rbd_dev);
3823 static void rbd_watch_errcb(void *arg, u64 cookie, int err)
3825 struct rbd_device *rbd_dev = arg;
3827 rbd_warn(rbd_dev, "encountered watch error: %d", err);
3829 down_write(&rbd_dev->lock_rwsem);
3830 rbd_set_owner_cid(rbd_dev, &rbd_empty_cid);
3831 up_write(&rbd_dev->lock_rwsem);
3833 mutex_lock(&rbd_dev->watch_mutex);
3834 if (rbd_dev->watch_state == RBD_WATCH_STATE_REGISTERED) {
3835 __rbd_unregister_watch(rbd_dev);
3836 rbd_dev->watch_state = RBD_WATCH_STATE_ERROR;
3838 queue_delayed_work(rbd_dev->task_wq, &rbd_dev->watch_dwork, 0);
3840 mutex_unlock(&rbd_dev->watch_mutex);
3844 * watch_mutex must be locked
3846 static int __rbd_register_watch(struct rbd_device *rbd_dev)
3848 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3849 struct ceph_osd_linger_request *handle;
3851 rbd_assert(!rbd_dev->watch_handle);
3852 dout("%s rbd_dev %p\n", __func__, rbd_dev);
3854 handle = ceph_osdc_watch(osdc, &rbd_dev->header_oid,
3855 &rbd_dev->header_oloc, rbd_watch_cb,
3856 rbd_watch_errcb, rbd_dev);
3858 return PTR_ERR(handle);
3860 rbd_dev->watch_handle = handle;
3865 * watch_mutex must be locked
3867 static void __rbd_unregister_watch(struct rbd_device *rbd_dev)
3869 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3872 rbd_assert(rbd_dev->watch_handle);
3873 dout("%s rbd_dev %p\n", __func__, rbd_dev);
3875 ret = ceph_osdc_unwatch(osdc, rbd_dev->watch_handle);
3877 rbd_warn(rbd_dev, "failed to unwatch: %d", ret);
3879 rbd_dev->watch_handle = NULL;
3882 static int rbd_register_watch(struct rbd_device *rbd_dev)
3886 mutex_lock(&rbd_dev->watch_mutex);
3887 rbd_assert(rbd_dev->watch_state == RBD_WATCH_STATE_UNREGISTERED);
3888 ret = __rbd_register_watch(rbd_dev);
3892 rbd_dev->watch_state = RBD_WATCH_STATE_REGISTERED;
3893 rbd_dev->watch_cookie = rbd_dev->watch_handle->linger_id;
3896 mutex_unlock(&rbd_dev->watch_mutex);
3900 static void cancel_tasks_sync(struct rbd_device *rbd_dev)
3902 dout("%s rbd_dev %p\n", __func__, rbd_dev);
3904 cancel_delayed_work_sync(&rbd_dev->watch_dwork);
3905 cancel_work_sync(&rbd_dev->acquired_lock_work);
3906 cancel_work_sync(&rbd_dev->released_lock_work);
3907 cancel_delayed_work_sync(&rbd_dev->lock_dwork);
3908 cancel_work_sync(&rbd_dev->unlock_work);
3911 static void rbd_unregister_watch(struct rbd_device *rbd_dev)
3913 WARN_ON(waitqueue_active(&rbd_dev->lock_waitq));
3914 cancel_tasks_sync(rbd_dev);
3916 mutex_lock(&rbd_dev->watch_mutex);
3917 if (rbd_dev->watch_state == RBD_WATCH_STATE_REGISTERED)
3918 __rbd_unregister_watch(rbd_dev);
3919 rbd_dev->watch_state = RBD_WATCH_STATE_UNREGISTERED;
3920 mutex_unlock(&rbd_dev->watch_mutex);
3922 ceph_osdc_flush_notifies(&rbd_dev->rbd_client->client->osdc);
3925 static void rbd_reregister_watch(struct work_struct *work)
3927 struct rbd_device *rbd_dev = container_of(to_delayed_work(work),
3928 struct rbd_device, watch_dwork);
3929 bool was_lock_owner = false;
3932 dout("%s rbd_dev %p\n", __func__, rbd_dev);
3934 down_write(&rbd_dev->lock_rwsem);
3935 if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED)
3936 was_lock_owner = rbd_release_lock(rbd_dev);
3938 mutex_lock(&rbd_dev->watch_mutex);
3939 if (rbd_dev->watch_state != RBD_WATCH_STATE_ERROR)
3942 ret = __rbd_register_watch(rbd_dev);
3944 rbd_warn(rbd_dev, "failed to reregister watch: %d", ret);
3945 if (ret != -EBLACKLISTED)
3946 queue_delayed_work(rbd_dev->task_wq,
3947 &rbd_dev->watch_dwork,
3952 rbd_dev->watch_state = RBD_WATCH_STATE_REGISTERED;
3953 rbd_dev->watch_cookie = rbd_dev->watch_handle->linger_id;
3954 mutex_unlock(&rbd_dev->watch_mutex);
3956 ret = rbd_dev_refresh(rbd_dev);
3958 rbd_warn(rbd_dev, "reregisteration refresh failed: %d", ret);
3960 if (was_lock_owner) {
3961 ret = rbd_try_lock(rbd_dev);
3963 rbd_warn(rbd_dev, "reregisteration lock failed: %d",
3967 up_write(&rbd_dev->lock_rwsem);
3968 wake_requests(rbd_dev, true);
3972 mutex_unlock(&rbd_dev->watch_mutex);
3973 up_write(&rbd_dev->lock_rwsem);
3977 * Synchronous osd object method call. Returns the number of bytes
3978 * returned in the outbound buffer, or a negative error code.
3980 static int rbd_obj_method_sync(struct rbd_device *rbd_dev,
3981 const char *object_name,
3982 const char *class_name,
3983 const char *method_name,
3984 const void *outbound,
3985 size_t outbound_size,
3987 size_t inbound_size)
3989 struct rbd_obj_request *obj_request;
3990 struct page **pages;
3995 * Method calls are ultimately read operations. The result
3996 * should placed into the inbound buffer provided. They
3997 * also supply outbound data--parameters for the object
3998 * method. Currently if this is present it will be a
4001 page_count = (u32)calc_pages_for(0, inbound_size);
4002 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
4004 return PTR_ERR(pages);
4007 obj_request = rbd_obj_request_create(object_name, 0, inbound_size,
4012 obj_request->pages = pages;
4013 obj_request->page_count = page_count;
4015 obj_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_READ, 1,
4017 if (!obj_request->osd_req)
4020 osd_req_op_cls_init(obj_request->osd_req, 0, CEPH_OSD_OP_CALL,
4021 class_name, method_name);
4022 if (outbound_size) {
4023 struct ceph_pagelist *pagelist;
4025 pagelist = kmalloc(sizeof (*pagelist), GFP_NOFS);
4029 ceph_pagelist_init(pagelist);
4030 ceph_pagelist_append(pagelist, outbound, outbound_size);
4031 osd_req_op_cls_request_data_pagelist(obj_request->osd_req, 0,
4034 osd_req_op_cls_response_data_pages(obj_request->osd_req, 0,
4035 obj_request->pages, inbound_size,
4037 rbd_osd_req_format_read(obj_request);
4039 rbd_obj_request_submit(obj_request);
4040 ret = rbd_obj_request_wait(obj_request);
4044 ret = obj_request->result;
4048 rbd_assert(obj_request->xferred < (u64)INT_MAX);
4049 ret = (int)obj_request->xferred;
4050 ceph_copy_from_page_vector(pages, inbound, 0, obj_request->xferred);
4053 rbd_obj_request_put(obj_request);
4055 ceph_release_page_vector(pages, page_count);
4061 * lock_rwsem must be held for read
4063 static void rbd_wait_state_locked(struct rbd_device *rbd_dev)
4069 * Note the use of mod_delayed_work() in rbd_acquire_lock()
4070 * and cancel_delayed_work() in wake_requests().
4072 dout("%s rbd_dev %p queueing lock_dwork\n", __func__, rbd_dev);
4073 queue_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork, 0);
4074 prepare_to_wait_exclusive(&rbd_dev->lock_waitq, &wait,
4075 TASK_UNINTERRUPTIBLE);
4076 up_read(&rbd_dev->lock_rwsem);
4078 down_read(&rbd_dev->lock_rwsem);
4079 } while (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED);
4080 finish_wait(&rbd_dev->lock_waitq, &wait);
4083 static void rbd_queue_workfn(struct work_struct *work)
4085 struct request *rq = blk_mq_rq_from_pdu(work);
4086 struct rbd_device *rbd_dev = rq->q->queuedata;
4087 struct rbd_img_request *img_request;
4088 struct ceph_snap_context *snapc = NULL;
4089 u64 offset = (u64)blk_rq_pos(rq) << SECTOR_SHIFT;
4090 u64 length = blk_rq_bytes(rq);
4091 enum obj_operation_type op_type;
4093 bool must_be_locked;
4096 if (rq->cmd_type != REQ_TYPE_FS) {
4097 dout("%s: non-fs request type %d\n", __func__,
4098 (int) rq->cmd_type);
4103 if (req_op(rq) == REQ_OP_DISCARD)
4104 op_type = OBJ_OP_DISCARD;
4105 else if (req_op(rq) == REQ_OP_WRITE)
4106 op_type = OBJ_OP_WRITE;
4108 op_type = OBJ_OP_READ;
4110 /* Ignore/skip any zero-length requests */
4113 dout("%s: zero-length request\n", __func__);
4118 /* Only reads are allowed to a read-only device */
4120 if (op_type != OBJ_OP_READ) {
4121 if (rbd_dev->mapping.read_only) {
4125 rbd_assert(rbd_dev->spec->snap_id == CEPH_NOSNAP);
4129 * Quit early if the mapped snapshot no longer exists. It's
4130 * still possible the snapshot will have disappeared by the
4131 * time our request arrives at the osd, but there's no sense in
4132 * sending it if we already know.
4134 if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags)) {
4135 dout("request for non-existent snapshot");
4136 rbd_assert(rbd_dev->spec->snap_id != CEPH_NOSNAP);
4141 if (offset && length > U64_MAX - offset + 1) {
4142 rbd_warn(rbd_dev, "bad request range (%llu~%llu)", offset,
4145 goto err_rq; /* Shouldn't happen */
4148 blk_mq_start_request(rq);
4150 down_read(&rbd_dev->header_rwsem);
4151 mapping_size = rbd_dev->mapping.size;
4152 if (op_type != OBJ_OP_READ) {
4153 snapc = rbd_dev->header.snapc;
4154 ceph_get_snap_context(snapc);
4155 must_be_locked = rbd_is_lock_supported(rbd_dev);
4157 must_be_locked = rbd_dev->opts->lock_on_read &&
4158 rbd_is_lock_supported(rbd_dev);
4160 up_read(&rbd_dev->header_rwsem);
4162 if (offset + length > mapping_size) {
4163 rbd_warn(rbd_dev, "beyond EOD (%llu~%llu > %llu)", offset,
4164 length, mapping_size);
4169 if (must_be_locked) {
4170 down_read(&rbd_dev->lock_rwsem);
4171 if (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED)
4172 rbd_wait_state_locked(rbd_dev);
4175 img_request = rbd_img_request_create(rbd_dev, offset, length, op_type,
4181 img_request->rq = rq;
4182 snapc = NULL; /* img_request consumes a ref */
4184 if (op_type == OBJ_OP_DISCARD)
4185 result = rbd_img_request_fill(img_request, OBJ_REQUEST_NODATA,
4188 result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
4191 goto err_img_request;
4193 result = rbd_img_request_submit(img_request);
4195 goto err_img_request;
4198 up_read(&rbd_dev->lock_rwsem);
4202 rbd_img_request_put(img_request);
4205 up_read(&rbd_dev->lock_rwsem);
4208 rbd_warn(rbd_dev, "%s %llx at %llx result %d",
4209 obj_op_name(op_type), length, offset, result);
4210 ceph_put_snap_context(snapc);
4212 blk_mq_end_request(rq, result);
4215 static int rbd_queue_rq(struct blk_mq_hw_ctx *hctx,
4216 const struct blk_mq_queue_data *bd)
4218 struct request *rq = bd->rq;
4219 struct work_struct *work = blk_mq_rq_to_pdu(rq);
4221 queue_work(rbd_wq, work);
4222 return BLK_MQ_RQ_QUEUE_OK;
4225 static void rbd_free_disk(struct rbd_device *rbd_dev)
4227 struct gendisk *disk = rbd_dev->disk;
4232 rbd_dev->disk = NULL;
4233 if (disk->flags & GENHD_FL_UP) {
4236 blk_cleanup_queue(disk->queue);
4237 blk_mq_free_tag_set(&rbd_dev->tag_set);
4242 static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
4243 const char *object_name,
4244 u64 offset, u64 length, void *buf)
4247 struct rbd_obj_request *obj_request;
4248 struct page **pages = NULL;
4253 page_count = (u32) calc_pages_for(offset, length);
4254 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
4256 return PTR_ERR(pages);
4259 obj_request = rbd_obj_request_create(object_name, offset, length,
4264 obj_request->pages = pages;
4265 obj_request->page_count = page_count;
4267 obj_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_READ, 1,
4269 if (!obj_request->osd_req)
4272 osd_req_op_extent_init(obj_request->osd_req, 0, CEPH_OSD_OP_READ,
4273 offset, length, 0, 0);
4274 osd_req_op_extent_osd_data_pages(obj_request->osd_req, 0,
4276 obj_request->length,
4277 obj_request->offset & ~PAGE_MASK,
4279 rbd_osd_req_format_read(obj_request);
4281 rbd_obj_request_submit(obj_request);
4282 ret = rbd_obj_request_wait(obj_request);
4286 ret = obj_request->result;
4290 rbd_assert(obj_request->xferred <= (u64) SIZE_MAX);
4291 size = (size_t) obj_request->xferred;
4292 ceph_copy_from_page_vector(pages, buf, 0, size);
4293 rbd_assert(size <= (size_t)INT_MAX);
4297 rbd_obj_request_put(obj_request);
4299 ceph_release_page_vector(pages, page_count);
4305 * Read the complete header for the given rbd device. On successful
4306 * return, the rbd_dev->header field will contain up-to-date
4307 * information about the image.
4309 static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev)
4311 struct rbd_image_header_ondisk *ondisk = NULL;
4318 * The complete header will include an array of its 64-bit
4319 * snapshot ids, followed by the names of those snapshots as
4320 * a contiguous block of NUL-terminated strings. Note that
4321 * the number of snapshots could change by the time we read
4322 * it in, in which case we re-read it.
4329 size = sizeof (*ondisk);
4330 size += snap_count * sizeof (struct rbd_image_snap_ondisk);
4332 ondisk = kmalloc(size, GFP_KERNEL);
4336 ret = rbd_obj_read_sync(rbd_dev, rbd_dev->header_oid.name,
4340 if ((size_t)ret < size) {
4342 rbd_warn(rbd_dev, "short header read (want %zd got %d)",
4346 if (!rbd_dev_ondisk_valid(ondisk)) {
4348 rbd_warn(rbd_dev, "invalid header");
4352 names_size = le64_to_cpu(ondisk->snap_names_len);
4353 want_count = snap_count;
4354 snap_count = le32_to_cpu(ondisk->snap_count);
4355 } while (snap_count != want_count);
4357 ret = rbd_header_from_disk(rbd_dev, ondisk);
4365 * Clear the rbd device's EXISTS flag if the snapshot it's mapped to
4366 * has disappeared from the (just updated) snapshot context.
4368 static void rbd_exists_validate(struct rbd_device *rbd_dev)
4372 if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags))
4375 snap_id = rbd_dev->spec->snap_id;
4376 if (snap_id == CEPH_NOSNAP)
4379 if (rbd_dev_snap_index(rbd_dev, snap_id) == BAD_SNAP_INDEX)
4380 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
4383 static void rbd_dev_update_size(struct rbd_device *rbd_dev)
4388 * If EXISTS is not set, rbd_dev->disk may be NULL, so don't
4389 * try to update its size. If REMOVING is set, updating size
4390 * is just useless work since the device can't be opened.
4392 if (test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags) &&
4393 !test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags)) {
4394 size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE;
4395 dout("setting size to %llu sectors", (unsigned long long)size);
4396 set_capacity(rbd_dev->disk, size);
4397 revalidate_disk(rbd_dev->disk);
4401 static int rbd_dev_refresh(struct rbd_device *rbd_dev)
4406 down_write(&rbd_dev->header_rwsem);
4407 mapping_size = rbd_dev->mapping.size;
4409 ret = rbd_dev_header_info(rbd_dev);
4414 * If there is a parent, see if it has disappeared due to the
4415 * mapped image getting flattened.
4417 if (rbd_dev->parent) {
4418 ret = rbd_dev_v2_parent_info(rbd_dev);
4423 if (rbd_dev->spec->snap_id == CEPH_NOSNAP) {
4424 rbd_dev->mapping.size = rbd_dev->header.image_size;
4426 /* validate mapped snapshot's EXISTS flag */
4427 rbd_exists_validate(rbd_dev);
4431 up_write(&rbd_dev->header_rwsem);
4432 if (!ret && mapping_size != rbd_dev->mapping.size)
4433 rbd_dev_update_size(rbd_dev);
4438 static int rbd_init_request(void *data, struct request *rq,
4439 unsigned int hctx_idx, unsigned int request_idx,
4440 unsigned int numa_node)
4442 struct work_struct *work = blk_mq_rq_to_pdu(rq);
4444 INIT_WORK(work, rbd_queue_workfn);
4448 static struct blk_mq_ops rbd_mq_ops = {
4449 .queue_rq = rbd_queue_rq,
4450 .map_queue = blk_mq_map_queue,
4451 .init_request = rbd_init_request,
4454 static int rbd_init_disk(struct rbd_device *rbd_dev)
4456 struct gendisk *disk;
4457 struct request_queue *q;
4461 /* create gendisk info */
4462 disk = alloc_disk(single_major ?
4463 (1 << RBD_SINGLE_MAJOR_PART_SHIFT) :
4464 RBD_MINORS_PER_MAJOR);
4468 snprintf(disk->disk_name, sizeof(disk->disk_name), RBD_DRV_NAME "%d",
4470 disk->major = rbd_dev->major;
4471 disk->first_minor = rbd_dev->minor;
4473 disk->flags |= GENHD_FL_EXT_DEVT;
4474 disk->fops = &rbd_bd_ops;
4475 disk->private_data = rbd_dev;
4477 memset(&rbd_dev->tag_set, 0, sizeof(rbd_dev->tag_set));
4478 rbd_dev->tag_set.ops = &rbd_mq_ops;
4479 rbd_dev->tag_set.queue_depth = rbd_dev->opts->queue_depth;
4480 rbd_dev->tag_set.numa_node = NUMA_NO_NODE;
4481 rbd_dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE;
4482 rbd_dev->tag_set.nr_hw_queues = 1;
4483 rbd_dev->tag_set.cmd_size = sizeof(struct work_struct);
4485 err = blk_mq_alloc_tag_set(&rbd_dev->tag_set);
4489 q = blk_mq_init_queue(&rbd_dev->tag_set);
4495 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
4496 /* QUEUE_FLAG_ADD_RANDOM is off by default for blk-mq */
4498 /* set io sizes to object size */
4499 segment_size = rbd_obj_bytes(&rbd_dev->header);
4500 blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE);
4501 q->limits.max_sectors = queue_max_hw_sectors(q);
4502 blk_queue_max_segments(q, segment_size / SECTOR_SIZE);
4503 blk_queue_max_segment_size(q, segment_size);
4504 blk_queue_io_min(q, segment_size);
4505 blk_queue_io_opt(q, segment_size);
4507 /* enable the discard support */
4508 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
4509 q->limits.discard_granularity = segment_size;
4510 q->limits.discard_alignment = segment_size;
4511 blk_queue_max_discard_sectors(q, segment_size / SECTOR_SIZE);
4512 q->limits.discard_zeroes_data = 1;
4514 if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC))
4515 q->backing_dev_info.capabilities |= BDI_CAP_STABLE_WRITES;
4519 q->queuedata = rbd_dev;
4521 rbd_dev->disk = disk;
4525 blk_mq_free_tag_set(&rbd_dev->tag_set);
4535 static struct rbd_device *dev_to_rbd_dev(struct device *dev)
4537 return container_of(dev, struct rbd_device, dev);
4540 static ssize_t rbd_size_show(struct device *dev,
4541 struct device_attribute *attr, char *buf)
4543 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4545 return sprintf(buf, "%llu\n",
4546 (unsigned long long)rbd_dev->mapping.size);
4550 * Note this shows the features for whatever's mapped, which is not
4551 * necessarily the base image.
4553 static ssize_t rbd_features_show(struct device *dev,
4554 struct device_attribute *attr, char *buf)
4556 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4558 return sprintf(buf, "0x%016llx\n",
4559 (unsigned long long)rbd_dev->mapping.features);
4562 static ssize_t rbd_major_show(struct device *dev,
4563 struct device_attribute *attr, char *buf)
4565 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4568 return sprintf(buf, "%d\n", rbd_dev->major);
4570 return sprintf(buf, "(none)\n");
4573 static ssize_t rbd_minor_show(struct device *dev,
4574 struct device_attribute *attr, char *buf)
4576 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4578 return sprintf(buf, "%d\n", rbd_dev->minor);
4581 static ssize_t rbd_client_addr_show(struct device *dev,
4582 struct device_attribute *attr, char *buf)
4584 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4585 struct ceph_entity_addr *client_addr =
4586 ceph_client_addr(rbd_dev->rbd_client->client);
4588 return sprintf(buf, "%pISpc/%u\n", &client_addr->in_addr,
4589 le32_to_cpu(client_addr->nonce));
4592 static ssize_t rbd_client_id_show(struct device *dev,
4593 struct device_attribute *attr, char *buf)
4595 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4597 return sprintf(buf, "client%lld\n",
4598 ceph_client_gid(rbd_dev->rbd_client->client));
4601 static ssize_t rbd_cluster_fsid_show(struct device *dev,
4602 struct device_attribute *attr, char *buf)
4604 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4606 return sprintf(buf, "%pU\n", &rbd_dev->rbd_client->client->fsid);
4609 static ssize_t rbd_config_info_show(struct device *dev,
4610 struct device_attribute *attr, char *buf)
4612 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4614 return sprintf(buf, "%s\n", rbd_dev->config_info);
4617 static ssize_t rbd_pool_show(struct device *dev,
4618 struct device_attribute *attr, char *buf)
4620 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4622 return sprintf(buf, "%s\n", rbd_dev->spec->pool_name);
4625 static ssize_t rbd_pool_id_show(struct device *dev,
4626 struct device_attribute *attr, char *buf)
4628 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4630 return sprintf(buf, "%llu\n",
4631 (unsigned long long) rbd_dev->spec->pool_id);
4634 static ssize_t rbd_name_show(struct device *dev,
4635 struct device_attribute *attr, char *buf)
4637 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4639 if (rbd_dev->spec->image_name)
4640 return sprintf(buf, "%s\n", rbd_dev->spec->image_name);
4642 return sprintf(buf, "(unknown)\n");
4645 static ssize_t rbd_image_id_show(struct device *dev,
4646 struct device_attribute *attr, char *buf)
4648 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4650 return sprintf(buf, "%s\n", rbd_dev->spec->image_id);
4654 * Shows the name of the currently-mapped snapshot (or
4655 * RBD_SNAP_HEAD_NAME for the base image).
4657 static ssize_t rbd_snap_show(struct device *dev,
4658 struct device_attribute *attr,
4661 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4663 return sprintf(buf, "%s\n", rbd_dev->spec->snap_name);
4666 static ssize_t rbd_snap_id_show(struct device *dev,
4667 struct device_attribute *attr, char *buf)
4669 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4671 return sprintf(buf, "%llu\n", rbd_dev->spec->snap_id);
4675 * For a v2 image, shows the chain of parent images, separated by empty
4676 * lines. For v1 images or if there is no parent, shows "(no parent
4679 static ssize_t rbd_parent_show(struct device *dev,
4680 struct device_attribute *attr,
4683 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4686 if (!rbd_dev->parent)
4687 return sprintf(buf, "(no parent image)\n");
4689 for ( ; rbd_dev->parent; rbd_dev = rbd_dev->parent) {
4690 struct rbd_spec *spec = rbd_dev->parent_spec;
4692 count += sprintf(&buf[count], "%s"
4693 "pool_id %llu\npool_name %s\n"
4694 "image_id %s\nimage_name %s\n"
4695 "snap_id %llu\nsnap_name %s\n"
4697 !count ? "" : "\n", /* first? */
4698 spec->pool_id, spec->pool_name,
4699 spec->image_id, spec->image_name ?: "(unknown)",
4700 spec->snap_id, spec->snap_name,
4701 rbd_dev->parent_overlap);
4707 static ssize_t rbd_image_refresh(struct device *dev,
4708 struct device_attribute *attr,
4712 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4715 ret = rbd_dev_refresh(rbd_dev);
4722 static DEVICE_ATTR(size, S_IRUGO, rbd_size_show, NULL);
4723 static DEVICE_ATTR(features, S_IRUGO, rbd_features_show, NULL);
4724 static DEVICE_ATTR(major, S_IRUGO, rbd_major_show, NULL);
4725 static DEVICE_ATTR(minor, S_IRUGO, rbd_minor_show, NULL);
4726 static DEVICE_ATTR(client_addr, S_IRUGO, rbd_client_addr_show, NULL);
4727 static DEVICE_ATTR(client_id, S_IRUGO, rbd_client_id_show, NULL);
4728 static DEVICE_ATTR(cluster_fsid, S_IRUGO, rbd_cluster_fsid_show, NULL);
4729 static DEVICE_ATTR(config_info, S_IRUSR, rbd_config_info_show, NULL);
4730 static DEVICE_ATTR(pool, S_IRUGO, rbd_pool_show, NULL);
4731 static DEVICE_ATTR(pool_id, S_IRUGO, rbd_pool_id_show, NULL);
4732 static DEVICE_ATTR(name, S_IRUGO, rbd_name_show, NULL);
4733 static DEVICE_ATTR(image_id, S_IRUGO, rbd_image_id_show, NULL);
4734 static DEVICE_ATTR(refresh, S_IWUSR, NULL, rbd_image_refresh);
4735 static DEVICE_ATTR(current_snap, S_IRUGO, rbd_snap_show, NULL);
4736 static DEVICE_ATTR(snap_id, S_IRUGO, rbd_snap_id_show, NULL);
4737 static DEVICE_ATTR(parent, S_IRUGO, rbd_parent_show, NULL);
4739 static struct attribute *rbd_attrs[] = {
4740 &dev_attr_size.attr,
4741 &dev_attr_features.attr,
4742 &dev_attr_major.attr,
4743 &dev_attr_minor.attr,
4744 &dev_attr_client_addr.attr,
4745 &dev_attr_client_id.attr,
4746 &dev_attr_cluster_fsid.attr,
4747 &dev_attr_config_info.attr,
4748 &dev_attr_pool.attr,
4749 &dev_attr_pool_id.attr,
4750 &dev_attr_name.attr,
4751 &dev_attr_image_id.attr,
4752 &dev_attr_current_snap.attr,
4753 &dev_attr_snap_id.attr,
4754 &dev_attr_parent.attr,
4755 &dev_attr_refresh.attr,
4759 static struct attribute_group rbd_attr_group = {
4763 static const struct attribute_group *rbd_attr_groups[] = {
4768 static void rbd_dev_release(struct device *dev);
4770 static struct device_type rbd_device_type = {
4772 .groups = rbd_attr_groups,
4773 .release = rbd_dev_release,
4776 static struct rbd_spec *rbd_spec_get(struct rbd_spec *spec)
4778 kref_get(&spec->kref);
4783 static void rbd_spec_free(struct kref *kref);
4784 static void rbd_spec_put(struct rbd_spec *spec)
4787 kref_put(&spec->kref, rbd_spec_free);
4790 static struct rbd_spec *rbd_spec_alloc(void)
4792 struct rbd_spec *spec;
4794 spec = kzalloc(sizeof (*spec), GFP_KERNEL);
4798 spec->pool_id = CEPH_NOPOOL;
4799 spec->snap_id = CEPH_NOSNAP;
4800 kref_init(&spec->kref);
4805 static void rbd_spec_free(struct kref *kref)
4807 struct rbd_spec *spec = container_of(kref, struct rbd_spec, kref);
4809 kfree(spec->pool_name);
4810 kfree(spec->image_id);
4811 kfree(spec->image_name);
4812 kfree(spec->snap_name);
4816 static void rbd_dev_free(struct rbd_device *rbd_dev)
4818 WARN_ON(rbd_dev->watch_state != RBD_WATCH_STATE_UNREGISTERED);
4819 WARN_ON(rbd_dev->lock_state != RBD_LOCK_STATE_UNLOCKED);
4821 ceph_oid_destroy(&rbd_dev->header_oid);
4822 ceph_oloc_destroy(&rbd_dev->header_oloc);
4823 kfree(rbd_dev->config_info);
4825 rbd_put_client(rbd_dev->rbd_client);
4826 rbd_spec_put(rbd_dev->spec);
4827 kfree(rbd_dev->opts);
4831 static void rbd_dev_release(struct device *dev)
4833 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4834 bool need_put = !!rbd_dev->opts;
4837 destroy_workqueue(rbd_dev->task_wq);
4838 ida_simple_remove(&rbd_dev_id_ida, rbd_dev->dev_id);
4841 rbd_dev_free(rbd_dev);
4844 * This is racy, but way better than putting module outside of
4845 * the release callback. The race window is pretty small, so
4846 * doing something similar to dm (dm-builtin.c) is overkill.
4849 module_put(THIS_MODULE);
4852 static struct rbd_device *__rbd_dev_create(struct rbd_client *rbdc,
4853 struct rbd_spec *spec)
4855 struct rbd_device *rbd_dev;
4857 rbd_dev = kzalloc(sizeof(*rbd_dev), GFP_KERNEL);
4861 spin_lock_init(&rbd_dev->lock);
4862 INIT_LIST_HEAD(&rbd_dev->node);
4863 init_rwsem(&rbd_dev->header_rwsem);
4865 ceph_oid_init(&rbd_dev->header_oid);
4866 ceph_oloc_init(&rbd_dev->header_oloc);
4868 mutex_init(&rbd_dev->watch_mutex);
4869 rbd_dev->watch_state = RBD_WATCH_STATE_UNREGISTERED;
4870 INIT_DELAYED_WORK(&rbd_dev->watch_dwork, rbd_reregister_watch);
4872 init_rwsem(&rbd_dev->lock_rwsem);
4873 rbd_dev->lock_state = RBD_LOCK_STATE_UNLOCKED;
4874 INIT_WORK(&rbd_dev->acquired_lock_work, rbd_notify_acquired_lock);
4875 INIT_WORK(&rbd_dev->released_lock_work, rbd_notify_released_lock);
4876 INIT_DELAYED_WORK(&rbd_dev->lock_dwork, rbd_acquire_lock);
4877 INIT_WORK(&rbd_dev->unlock_work, rbd_release_lock_work);
4878 init_waitqueue_head(&rbd_dev->lock_waitq);
4880 rbd_dev->dev.bus = &rbd_bus_type;
4881 rbd_dev->dev.type = &rbd_device_type;
4882 rbd_dev->dev.parent = &rbd_root_dev;
4883 device_initialize(&rbd_dev->dev);
4885 rbd_dev->rbd_client = rbdc;
4886 rbd_dev->spec = spec;
4888 rbd_dev->layout.stripe_unit = 1 << RBD_MAX_OBJ_ORDER;
4889 rbd_dev->layout.stripe_count = 1;
4890 rbd_dev->layout.object_size = 1 << RBD_MAX_OBJ_ORDER;
4891 rbd_dev->layout.pool_id = spec->pool_id;
4892 RCU_INIT_POINTER(rbd_dev->layout.pool_ns, NULL);
4898 * Create a mapping rbd_dev.
4900 static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
4901 struct rbd_spec *spec,
4902 struct rbd_options *opts)
4904 struct rbd_device *rbd_dev;
4906 rbd_dev = __rbd_dev_create(rbdc, spec);
4910 rbd_dev->opts = opts;
4912 /* get an id and fill in device name */
4913 rbd_dev->dev_id = ida_simple_get(&rbd_dev_id_ida, 0,
4914 minor_to_rbd_dev_id(1 << MINORBITS),
4916 if (rbd_dev->dev_id < 0)
4919 sprintf(rbd_dev->name, RBD_DRV_NAME "%d", rbd_dev->dev_id);
4920 rbd_dev->task_wq = alloc_ordered_workqueue("%s-tasks", WQ_MEM_RECLAIM,
4922 if (!rbd_dev->task_wq)
4925 /* we have a ref from do_rbd_add() */
4926 __module_get(THIS_MODULE);
4928 dout("%s rbd_dev %p dev_id %d\n", __func__, rbd_dev, rbd_dev->dev_id);
4932 ida_simple_remove(&rbd_dev_id_ida, rbd_dev->dev_id);
4934 rbd_dev_free(rbd_dev);
4938 static void rbd_dev_destroy(struct rbd_device *rbd_dev)
4941 put_device(&rbd_dev->dev);
4945 * Get the size and object order for an image snapshot, or if
4946 * snap_id is CEPH_NOSNAP, gets this information for the base
4949 static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
4950 u8 *order, u64 *snap_size)
4952 __le64 snapid = cpu_to_le64(snap_id);
4957 } __attribute__ ((packed)) size_buf = { 0 };
4959 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_oid.name,
4961 &snapid, sizeof (snapid),
4962 &size_buf, sizeof (size_buf));
4963 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4966 if (ret < sizeof (size_buf))
4970 *order = size_buf.order;
4971 dout(" order %u", (unsigned int)*order);
4973 *snap_size = le64_to_cpu(size_buf.size);
4975 dout(" snap_id 0x%016llx snap_size = %llu\n",
4976 (unsigned long long)snap_id,
4977 (unsigned long long)*snap_size);
4982 static int rbd_dev_v2_image_size(struct rbd_device *rbd_dev)
4984 return _rbd_dev_v2_snap_size(rbd_dev, CEPH_NOSNAP,
4985 &rbd_dev->header.obj_order,
4986 &rbd_dev->header.image_size);
4989 static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev)
4995 reply_buf = kzalloc(RBD_OBJ_PREFIX_LEN_MAX, GFP_KERNEL);
4999 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_oid.name,
5000 "rbd", "get_object_prefix", NULL, 0,
5001 reply_buf, RBD_OBJ_PREFIX_LEN_MAX);
5002 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
5007 rbd_dev->header.object_prefix = ceph_extract_encoded_string(&p,
5008 p + ret, NULL, GFP_NOIO);
5011 if (IS_ERR(rbd_dev->header.object_prefix)) {
5012 ret = PTR_ERR(rbd_dev->header.object_prefix);
5013 rbd_dev->header.object_prefix = NULL;
5015 dout(" object_prefix = %s\n", rbd_dev->header.object_prefix);
5023 static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
5026 __le64 snapid = cpu_to_le64(snap_id);
5030 } __attribute__ ((packed)) features_buf = { 0 };
5034 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_oid.name,
5035 "rbd", "get_features",
5036 &snapid, sizeof (snapid),
5037 &features_buf, sizeof (features_buf));
5038 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
5041 if (ret < sizeof (features_buf))
5044 unsup = le64_to_cpu(features_buf.incompat) & ~RBD_FEATURES_SUPPORTED;
5046 rbd_warn(rbd_dev, "image uses unsupported features: 0x%llx",
5051 *snap_features = le64_to_cpu(features_buf.features);
5053 dout(" snap_id 0x%016llx features = 0x%016llx incompat = 0x%016llx\n",
5054 (unsigned long long)snap_id,
5055 (unsigned long long)*snap_features,
5056 (unsigned long long)le64_to_cpu(features_buf.incompat));
5061 static int rbd_dev_v2_features(struct rbd_device *rbd_dev)
5063 return _rbd_dev_v2_snap_features(rbd_dev, CEPH_NOSNAP,
5064 &rbd_dev->header.features);
5067 static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
5069 struct rbd_spec *parent_spec;
5071 void *reply_buf = NULL;
5081 parent_spec = rbd_spec_alloc();
5085 size = sizeof (__le64) + /* pool_id */
5086 sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX + /* image_id */
5087 sizeof (__le64) + /* snap_id */
5088 sizeof (__le64); /* overlap */
5089 reply_buf = kmalloc(size, GFP_KERNEL);
5095 snapid = cpu_to_le64(rbd_dev->spec->snap_id);
5096 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_oid.name,
5097 "rbd", "get_parent",
5098 &snapid, sizeof (snapid),
5100 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
5105 end = reply_buf + ret;
5107 ceph_decode_64_safe(&p, end, pool_id, out_err);
5108 if (pool_id == CEPH_NOPOOL) {
5110 * Either the parent never existed, or we have
5111 * record of it but the image got flattened so it no
5112 * longer has a parent. When the parent of a
5113 * layered image disappears we immediately set the
5114 * overlap to 0. The effect of this is that all new
5115 * requests will be treated as if the image had no
5118 if (rbd_dev->parent_overlap) {
5119 rbd_dev->parent_overlap = 0;
5120 rbd_dev_parent_put(rbd_dev);
5121 pr_info("%s: clone image has been flattened\n",
5122 rbd_dev->disk->disk_name);
5125 goto out; /* No parent? No problem. */
5128 /* The ceph file layout needs to fit pool id in 32 bits */
5131 if (pool_id > (u64)U32_MAX) {
5132 rbd_warn(NULL, "parent pool id too large (%llu > %u)",
5133 (unsigned long long)pool_id, U32_MAX);
5137 image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
5138 if (IS_ERR(image_id)) {
5139 ret = PTR_ERR(image_id);
5142 ceph_decode_64_safe(&p, end, snap_id, out_err);
5143 ceph_decode_64_safe(&p, end, overlap, out_err);
5146 * The parent won't change (except when the clone is
5147 * flattened, already handled that). So we only need to
5148 * record the parent spec we have not already done so.
5150 if (!rbd_dev->parent_spec) {
5151 parent_spec->pool_id = pool_id;
5152 parent_spec->image_id = image_id;
5153 parent_spec->snap_id = snap_id;
5154 rbd_dev->parent_spec = parent_spec;
5155 parent_spec = NULL; /* rbd_dev now owns this */
5161 * We always update the parent overlap. If it's zero we issue
5162 * a warning, as we will proceed as if there was no parent.
5166 /* refresh, careful to warn just once */
5167 if (rbd_dev->parent_overlap)
5169 "clone now standalone (overlap became 0)");
5172 rbd_warn(rbd_dev, "clone is standalone (overlap 0)");
5175 rbd_dev->parent_overlap = overlap;
5181 rbd_spec_put(parent_spec);
5186 static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev)
5190 __le64 stripe_count;
5191 } __attribute__ ((packed)) striping_info_buf = { 0 };
5192 size_t size = sizeof (striping_info_buf);
5199 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_oid.name,
5200 "rbd", "get_stripe_unit_count", NULL, 0,
5201 (char *)&striping_info_buf, size);
5202 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
5209 * We don't actually support the "fancy striping" feature
5210 * (STRIPINGV2) yet, but if the striping sizes are the
5211 * defaults the behavior is the same as before. So find
5212 * out, and only fail if the image has non-default values.
5215 obj_size = (u64)1 << rbd_dev->header.obj_order;
5216 p = &striping_info_buf;
5217 stripe_unit = ceph_decode_64(&p);
5218 if (stripe_unit != obj_size) {
5219 rbd_warn(rbd_dev, "unsupported stripe unit "
5220 "(got %llu want %llu)",
5221 stripe_unit, obj_size);
5224 stripe_count = ceph_decode_64(&p);
5225 if (stripe_count != 1) {
5226 rbd_warn(rbd_dev, "unsupported stripe count "
5227 "(got %llu want 1)", stripe_count);
5230 rbd_dev->header.stripe_unit = stripe_unit;
5231 rbd_dev->header.stripe_count = stripe_count;
5236 static char *rbd_dev_image_name(struct rbd_device *rbd_dev)
5238 size_t image_id_size;
5243 void *reply_buf = NULL;
5245 char *image_name = NULL;
5248 rbd_assert(!rbd_dev->spec->image_name);
5250 len = strlen(rbd_dev->spec->image_id);
5251 image_id_size = sizeof (__le32) + len;
5252 image_id = kmalloc(image_id_size, GFP_KERNEL);
5257 end = image_id + image_id_size;
5258 ceph_encode_string(&p, end, rbd_dev->spec->image_id, (u32)len);
5260 size = sizeof (__le32) + RBD_IMAGE_NAME_LEN_MAX;
5261 reply_buf = kmalloc(size, GFP_KERNEL);
5265 ret = rbd_obj_method_sync(rbd_dev, RBD_DIRECTORY,
5266 "rbd", "dir_get_name",
5267 image_id, image_id_size,
5272 end = reply_buf + ret;
5274 image_name = ceph_extract_encoded_string(&p, end, &len, GFP_KERNEL);
5275 if (IS_ERR(image_name))
5278 dout("%s: name is %s len is %zd\n", __func__, image_name, len);
5286 static u64 rbd_v1_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
5288 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
5289 const char *snap_name;
5292 /* Skip over names until we find the one we are looking for */
5294 snap_name = rbd_dev->header.snap_names;
5295 while (which < snapc->num_snaps) {
5296 if (!strcmp(name, snap_name))
5297 return snapc->snaps[which];
5298 snap_name += strlen(snap_name) + 1;
5304 static u64 rbd_v2_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
5306 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
5311 for (which = 0; !found && which < snapc->num_snaps; which++) {
5312 const char *snap_name;
5314 snap_id = snapc->snaps[which];
5315 snap_name = rbd_dev_v2_snap_name(rbd_dev, snap_id);
5316 if (IS_ERR(snap_name)) {
5317 /* ignore no-longer existing snapshots */
5318 if (PTR_ERR(snap_name) == -ENOENT)
5323 found = !strcmp(name, snap_name);
5326 return found ? snap_id : CEPH_NOSNAP;
5330 * Assumes name is never RBD_SNAP_HEAD_NAME; returns CEPH_NOSNAP if
5331 * no snapshot by that name is found, or if an error occurs.
5333 static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
5335 if (rbd_dev->image_format == 1)
5336 return rbd_v1_snap_id_by_name(rbd_dev, name);
5338 return rbd_v2_snap_id_by_name(rbd_dev, name);
5342 * An image being mapped will have everything but the snap id.
5344 static int rbd_spec_fill_snap_id(struct rbd_device *rbd_dev)
5346 struct rbd_spec *spec = rbd_dev->spec;
5348 rbd_assert(spec->pool_id != CEPH_NOPOOL && spec->pool_name);
5349 rbd_assert(spec->image_id && spec->image_name);
5350 rbd_assert(spec->snap_name);
5352 if (strcmp(spec->snap_name, RBD_SNAP_HEAD_NAME)) {
5355 snap_id = rbd_snap_id_by_name(rbd_dev, spec->snap_name);
5356 if (snap_id == CEPH_NOSNAP)
5359 spec->snap_id = snap_id;
5361 spec->snap_id = CEPH_NOSNAP;
5368 * A parent image will have all ids but none of the names.
5370 * All names in an rbd spec are dynamically allocated. It's OK if we
5371 * can't figure out the name for an image id.
5373 static int rbd_spec_fill_names(struct rbd_device *rbd_dev)
5375 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
5376 struct rbd_spec *spec = rbd_dev->spec;
5377 const char *pool_name;
5378 const char *image_name;
5379 const char *snap_name;
5382 rbd_assert(spec->pool_id != CEPH_NOPOOL);
5383 rbd_assert(spec->image_id);
5384 rbd_assert(spec->snap_id != CEPH_NOSNAP);
5386 /* Get the pool name; we have to make our own copy of this */
5388 pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, spec->pool_id);
5390 rbd_warn(rbd_dev, "no pool with id %llu", spec->pool_id);
5393 pool_name = kstrdup(pool_name, GFP_KERNEL);
5397 /* Fetch the image name; tolerate failure here */
5399 image_name = rbd_dev_image_name(rbd_dev);
5401 rbd_warn(rbd_dev, "unable to get image name");
5403 /* Fetch the snapshot name */
5405 snap_name = rbd_snap_name(rbd_dev, spec->snap_id);
5406 if (IS_ERR(snap_name)) {
5407 ret = PTR_ERR(snap_name);
5411 spec->pool_name = pool_name;
5412 spec->image_name = image_name;
5413 spec->snap_name = snap_name;
5423 static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev)
5432 struct ceph_snap_context *snapc;
5436 * We'll need room for the seq value (maximum snapshot id),
5437 * snapshot count, and array of that many snapshot ids.
5438 * For now we have a fixed upper limit on the number we're
5439 * prepared to receive.
5441 size = sizeof (__le64) + sizeof (__le32) +
5442 RBD_MAX_SNAP_COUNT * sizeof (__le64);
5443 reply_buf = kzalloc(size, GFP_KERNEL);
5447 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_oid.name,
5448 "rbd", "get_snapcontext", NULL, 0,
5450 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
5455 end = reply_buf + ret;
5457 ceph_decode_64_safe(&p, end, seq, out);
5458 ceph_decode_32_safe(&p, end, snap_count, out);
5461 * Make sure the reported number of snapshot ids wouldn't go
5462 * beyond the end of our buffer. But before checking that,
5463 * make sure the computed size of the snapshot context we
5464 * allocate is representable in a size_t.
5466 if (snap_count > (SIZE_MAX - sizeof (struct ceph_snap_context))
5471 if (!ceph_has_room(&p, end, snap_count * sizeof (__le64)))
5475 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
5481 for (i = 0; i < snap_count; i++)
5482 snapc->snaps[i] = ceph_decode_64(&p);
5484 ceph_put_snap_context(rbd_dev->header.snapc);
5485 rbd_dev->header.snapc = snapc;
5487 dout(" snap context seq = %llu, snap_count = %u\n",
5488 (unsigned long long)seq, (unsigned int)snap_count);
5495 static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
5506 size = sizeof (__le32) + RBD_MAX_SNAP_NAME_LEN;
5507 reply_buf = kmalloc(size, GFP_KERNEL);
5509 return ERR_PTR(-ENOMEM);
5511 snapid = cpu_to_le64(snap_id);
5512 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_oid.name,
5513 "rbd", "get_snapshot_name",
5514 &snapid, sizeof (snapid),
5516 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
5518 snap_name = ERR_PTR(ret);
5523 end = reply_buf + ret;
5524 snap_name = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
5525 if (IS_ERR(snap_name))
5528 dout(" snap_id 0x%016llx snap_name = %s\n",
5529 (unsigned long long)snap_id, snap_name);
5536 static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev)
5538 bool first_time = rbd_dev->header.object_prefix == NULL;
5541 ret = rbd_dev_v2_image_size(rbd_dev);
5546 ret = rbd_dev_v2_header_onetime(rbd_dev);
5551 ret = rbd_dev_v2_snap_context(rbd_dev);
5552 if (ret && first_time) {
5553 kfree(rbd_dev->header.object_prefix);
5554 rbd_dev->header.object_prefix = NULL;
5560 static int rbd_dev_header_info(struct rbd_device *rbd_dev)
5562 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
5564 if (rbd_dev->image_format == 1)
5565 return rbd_dev_v1_header_info(rbd_dev);
5567 return rbd_dev_v2_header_info(rbd_dev);
5571 * Skips over white space at *buf, and updates *buf to point to the
5572 * first found non-space character (if any). Returns the length of
5573 * the token (string of non-white space characters) found. Note
5574 * that *buf must be terminated with '\0'.
5576 static inline size_t next_token(const char **buf)
5579 * These are the characters that produce nonzero for
5580 * isspace() in the "C" and "POSIX" locales.
5582 const char *spaces = " \f\n\r\t\v";
5584 *buf += strspn(*buf, spaces); /* Find start of token */
5586 return strcspn(*buf, spaces); /* Return token length */
5590 * Finds the next token in *buf, dynamically allocates a buffer big
5591 * enough to hold a copy of it, and copies the token into the new
5592 * buffer. The copy is guaranteed to be terminated with '\0'. Note
5593 * that a duplicate buffer is created even for a zero-length token.
5595 * Returns a pointer to the newly-allocated duplicate, or a null
5596 * pointer if memory for the duplicate was not available. If
5597 * the lenp argument is a non-null pointer, the length of the token
5598 * (not including the '\0') is returned in *lenp.
5600 * If successful, the *buf pointer will be updated to point beyond
5601 * the end of the found token.
5603 * Note: uses GFP_KERNEL for allocation.
5605 static inline char *dup_token(const char **buf, size_t *lenp)
5610 len = next_token(buf);
5611 dup = kmemdup(*buf, len + 1, GFP_KERNEL);
5614 *(dup + len) = '\0';
5624 * Parse the options provided for an "rbd add" (i.e., rbd image
5625 * mapping) request. These arrive via a write to /sys/bus/rbd/add,
5626 * and the data written is passed here via a NUL-terminated buffer.
5627 * Returns 0 if successful or an error code otherwise.
5629 * The information extracted from these options is recorded in
5630 * the other parameters which return dynamically-allocated
5633 * The address of a pointer that will refer to a ceph options
5634 * structure. Caller must release the returned pointer using
5635 * ceph_destroy_options() when it is no longer needed.
5637 * Address of an rbd options pointer. Fully initialized by
5638 * this function; caller must release with kfree().
5640 * Address of an rbd image specification pointer. Fully
5641 * initialized by this function based on parsed options.
5642 * Caller must release with rbd_spec_put().
5644 * The options passed take this form:
5645 * <mon_addrs> <options> <pool_name> <image_name> [<snap_id>]
5648 * A comma-separated list of one or more monitor addresses.
5649 * A monitor address is an ip address, optionally followed
5650 * by a port number (separated by a colon).
5651 * I.e.: ip1[:port1][,ip2[:port2]...]
5653 * A comma-separated list of ceph and/or rbd options.
5655 * The name of the rados pool containing the rbd image.
5657 * The name of the image in that pool to map.
5659 * An optional snapshot id. If provided, the mapping will
5660 * present data from the image at the time that snapshot was
5661 * created. The image head is used if no snapshot id is
5662 * provided. Snapshot mappings are always read-only.
5664 static int rbd_add_parse_args(const char *buf,
5665 struct ceph_options **ceph_opts,
5666 struct rbd_options **opts,
5667 struct rbd_spec **rbd_spec)
5671 const char *mon_addrs;
5673 size_t mon_addrs_size;
5674 struct rbd_spec *spec = NULL;
5675 struct rbd_options *rbd_opts = NULL;
5676 struct ceph_options *copts;
5679 /* The first four tokens are required */
5681 len = next_token(&buf);
5683 rbd_warn(NULL, "no monitor address(es) provided");
5687 mon_addrs_size = len + 1;
5691 options = dup_token(&buf, NULL);
5695 rbd_warn(NULL, "no options provided");
5699 spec = rbd_spec_alloc();
5703 spec->pool_name = dup_token(&buf, NULL);
5704 if (!spec->pool_name)
5706 if (!*spec->pool_name) {
5707 rbd_warn(NULL, "no pool name provided");
5711 spec->image_name = dup_token(&buf, NULL);
5712 if (!spec->image_name)
5714 if (!*spec->image_name) {
5715 rbd_warn(NULL, "no image name provided");
5720 * Snapshot name is optional; default is to use "-"
5721 * (indicating the head/no snapshot).
5723 len = next_token(&buf);
5725 buf = RBD_SNAP_HEAD_NAME; /* No snapshot supplied */
5726 len = sizeof (RBD_SNAP_HEAD_NAME) - 1;
5727 } else if (len > RBD_MAX_SNAP_NAME_LEN) {
5728 ret = -ENAMETOOLONG;
5731 snap_name = kmemdup(buf, len + 1, GFP_KERNEL);
5734 *(snap_name + len) = '\0';
5735 spec->snap_name = snap_name;
5737 /* Initialize all rbd options to the defaults */
5739 rbd_opts = kzalloc(sizeof (*rbd_opts), GFP_KERNEL);
5743 rbd_opts->read_only = RBD_READ_ONLY_DEFAULT;
5744 rbd_opts->queue_depth = RBD_QUEUE_DEPTH_DEFAULT;
5745 rbd_opts->lock_on_read = RBD_LOCK_ON_READ_DEFAULT;
5747 copts = ceph_parse_options(options, mon_addrs,
5748 mon_addrs + mon_addrs_size - 1,
5749 parse_rbd_opts_token, rbd_opts);
5750 if (IS_ERR(copts)) {
5751 ret = PTR_ERR(copts);
5772 * Return pool id (>= 0) or a negative error code.
5774 static int rbd_add_get_pool_id(struct rbd_client *rbdc, const char *pool_name)
5776 struct ceph_options *opts = rbdc->client->options;
5782 ret = ceph_pg_poolid_by_name(rbdc->client->osdc.osdmap, pool_name);
5783 if (ret == -ENOENT && tries++ < 1) {
5784 ret = ceph_monc_get_version(&rbdc->client->monc, "osdmap",
5789 if (rbdc->client->osdc.osdmap->epoch < newest_epoch) {
5790 ceph_osdc_maybe_request_map(&rbdc->client->osdc);
5791 (void) ceph_monc_wait_osdmap(&rbdc->client->monc,
5793 opts->mount_timeout);
5796 /* the osdmap we have is new enough */
5805 * An rbd format 2 image has a unique identifier, distinct from the
5806 * name given to it by the user. Internally, that identifier is
5807 * what's used to specify the names of objects related to the image.
5809 * A special "rbd id" object is used to map an rbd image name to its
5810 * id. If that object doesn't exist, then there is no v2 rbd image
5811 * with the supplied name.
5813 * This function will record the given rbd_dev's image_id field if
5814 * it can be determined, and in that case will return 0. If any
5815 * errors occur a negative errno will be returned and the rbd_dev's
5816 * image_id field will be unchanged (and should be NULL).
5818 static int rbd_dev_image_id(struct rbd_device *rbd_dev)
5827 * When probing a parent image, the image id is already
5828 * known (and the image name likely is not). There's no
5829 * need to fetch the image id again in this case. We
5830 * do still need to set the image format though.
5832 if (rbd_dev->spec->image_id) {
5833 rbd_dev->image_format = *rbd_dev->spec->image_id ? 2 : 1;
5839 * First, see if the format 2 image id file exists, and if
5840 * so, get the image's persistent id from it.
5842 size = sizeof (RBD_ID_PREFIX) + strlen(rbd_dev->spec->image_name);
5843 object_name = kmalloc(size, GFP_NOIO);
5846 sprintf(object_name, "%s%s", RBD_ID_PREFIX, rbd_dev->spec->image_name);
5847 dout("rbd id object name is %s\n", object_name);
5849 /* Response will be an encoded string, which includes a length */
5851 size = sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX;
5852 response = kzalloc(size, GFP_NOIO);
5858 /* If it doesn't exist we'll assume it's a format 1 image */
5860 ret = rbd_obj_method_sync(rbd_dev, object_name,
5861 "rbd", "get_id", NULL, 0,
5862 response, RBD_IMAGE_ID_LEN_MAX);
5863 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
5864 if (ret == -ENOENT) {
5865 image_id = kstrdup("", GFP_KERNEL);
5866 ret = image_id ? 0 : -ENOMEM;
5868 rbd_dev->image_format = 1;
5869 } else if (ret >= 0) {
5872 image_id = ceph_extract_encoded_string(&p, p + ret,
5874 ret = PTR_ERR_OR_ZERO(image_id);
5876 rbd_dev->image_format = 2;
5880 rbd_dev->spec->image_id = image_id;
5881 dout("image_id is %s\n", image_id);
5891 * Undo whatever state changes are made by v1 or v2 header info
5894 static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
5896 struct rbd_image_header *header;
5898 rbd_dev_parent_put(rbd_dev);
5900 /* Free dynamic fields from the header, then zero it out */
5902 header = &rbd_dev->header;
5903 ceph_put_snap_context(header->snapc);
5904 kfree(header->snap_sizes);
5905 kfree(header->snap_names);
5906 kfree(header->object_prefix);
5907 memset(header, 0, sizeof (*header));
5910 static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev)
5914 ret = rbd_dev_v2_object_prefix(rbd_dev);
5919 * Get the and check features for the image. Currently the
5920 * features are assumed to never change.
5922 ret = rbd_dev_v2_features(rbd_dev);
5926 /* If the image supports fancy striping, get its parameters */
5928 if (rbd_dev->header.features & RBD_FEATURE_STRIPINGV2) {
5929 ret = rbd_dev_v2_striping_info(rbd_dev);
5933 /* No support for crypto and compression type format 2 images */
5937 rbd_dev->header.features = 0;
5938 kfree(rbd_dev->header.object_prefix);
5939 rbd_dev->header.object_prefix = NULL;
5945 * @depth is rbd_dev_image_probe() -> rbd_dev_probe_parent() ->
5946 * rbd_dev_image_probe() recursion depth, which means it's also the
5947 * length of the already discovered part of the parent chain.
5949 static int rbd_dev_probe_parent(struct rbd_device *rbd_dev, int depth)
5951 struct rbd_device *parent = NULL;
5954 if (!rbd_dev->parent_spec)
5957 if (++depth > RBD_MAX_PARENT_CHAIN_LEN) {
5958 pr_info("parent chain is too long (%d)\n", depth);
5963 parent = __rbd_dev_create(rbd_dev->rbd_client, rbd_dev->parent_spec);
5970 * Images related by parent/child relationships always share
5971 * rbd_client and spec/parent_spec, so bump their refcounts.
5973 __rbd_get_client(rbd_dev->rbd_client);
5974 rbd_spec_get(rbd_dev->parent_spec);
5976 ret = rbd_dev_image_probe(parent, depth);
5980 rbd_dev->parent = parent;
5981 atomic_set(&rbd_dev->parent_ref, 1);
5985 rbd_dev_unparent(rbd_dev);
5986 rbd_dev_destroy(parent);
5991 * rbd_dev->header_rwsem must be locked for write and will be unlocked
5994 static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
5998 /* Record our major and minor device numbers. */
6000 if (!single_major) {
6001 ret = register_blkdev(0, rbd_dev->name);
6003 goto err_out_unlock;
6005 rbd_dev->major = ret;
6008 rbd_dev->major = rbd_major;
6009 rbd_dev->minor = rbd_dev_id_to_minor(rbd_dev->dev_id);
6012 /* Set up the blkdev mapping. */
6014 ret = rbd_init_disk(rbd_dev);
6016 goto err_out_blkdev;
6018 ret = rbd_dev_mapping_set(rbd_dev);
6022 set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
6023 set_disk_ro(rbd_dev->disk, rbd_dev->mapping.read_only);
6025 dev_set_name(&rbd_dev->dev, "%d", rbd_dev->dev_id);
6026 ret = device_add(&rbd_dev->dev);
6028 goto err_out_mapping;
6030 /* Everything's ready. Announce the disk to the world. */
6032 set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
6033 up_write(&rbd_dev->header_rwsem);
6035 spin_lock(&rbd_dev_list_lock);
6036 list_add_tail(&rbd_dev->node, &rbd_dev_list);
6037 spin_unlock(&rbd_dev_list_lock);
6039 add_disk(rbd_dev->disk);
6040 pr_info("%s: capacity %llu features 0x%llx\n", rbd_dev->disk->disk_name,
6041 (unsigned long long)get_capacity(rbd_dev->disk) << SECTOR_SHIFT,
6042 rbd_dev->header.features);
6047 rbd_dev_mapping_clear(rbd_dev);
6049 rbd_free_disk(rbd_dev);
6052 unregister_blkdev(rbd_dev->major, rbd_dev->name);
6054 up_write(&rbd_dev->header_rwsem);
6058 static int rbd_dev_header_name(struct rbd_device *rbd_dev)
6060 struct rbd_spec *spec = rbd_dev->spec;
6063 /* Record the header object name for this rbd image. */
6065 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
6067 rbd_dev->header_oloc.pool = rbd_dev->layout.pool_id;
6068 if (rbd_dev->image_format == 1)
6069 ret = ceph_oid_aprintf(&rbd_dev->header_oid, GFP_KERNEL, "%s%s",
6070 spec->image_name, RBD_SUFFIX);
6072 ret = ceph_oid_aprintf(&rbd_dev->header_oid, GFP_KERNEL, "%s%s",
6073 RBD_HEADER_PREFIX, spec->image_id);
6078 static void rbd_dev_image_release(struct rbd_device *rbd_dev)
6080 rbd_dev_unprobe(rbd_dev);
6081 rbd_dev->image_format = 0;
6082 kfree(rbd_dev->spec->image_id);
6083 rbd_dev->spec->image_id = NULL;
6085 rbd_dev_destroy(rbd_dev);
6089 * Probe for the existence of the header object for the given rbd
6090 * device. If this image is the one being mapped (i.e., not a
6091 * parent), initiate a watch on its header object before using that
6092 * object to get detailed information about the rbd image.
6094 static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth)
6099 * Get the id from the image id object. Unless there's an
6100 * error, rbd_dev->spec->image_id will be filled in with
6101 * a dynamically-allocated string, and rbd_dev->image_format
6102 * will be set to either 1 or 2.
6104 ret = rbd_dev_image_id(rbd_dev);
6108 ret = rbd_dev_header_name(rbd_dev);
6110 goto err_out_format;
6113 ret = rbd_register_watch(rbd_dev);
6116 pr_info("image %s/%s does not exist\n",
6117 rbd_dev->spec->pool_name,
6118 rbd_dev->spec->image_name);
6119 goto err_out_format;
6123 ret = rbd_dev_header_info(rbd_dev);
6128 * If this image is the one being mapped, we have pool name and
6129 * id, image name and id, and snap name - need to fill snap id.
6130 * Otherwise this is a parent image, identified by pool, image
6131 * and snap ids - need to fill in names for those ids.
6134 ret = rbd_spec_fill_snap_id(rbd_dev);
6136 ret = rbd_spec_fill_names(rbd_dev);
6139 pr_info("snap %s/%s@%s does not exist\n",
6140 rbd_dev->spec->pool_name,
6141 rbd_dev->spec->image_name,
6142 rbd_dev->spec->snap_name);
6146 if (rbd_dev->header.features & RBD_FEATURE_LAYERING) {
6147 ret = rbd_dev_v2_parent_info(rbd_dev);
6152 * Need to warn users if this image is the one being
6153 * mapped and has a parent.
6155 if (!depth && rbd_dev->parent_spec)
6157 "WARNING: kernel layering is EXPERIMENTAL!");
6160 ret = rbd_dev_probe_parent(rbd_dev, depth);
6164 dout("discovered format %u image, header name is %s\n",
6165 rbd_dev->image_format, rbd_dev->header_oid.name);
6169 rbd_dev_unprobe(rbd_dev);
6172 rbd_unregister_watch(rbd_dev);
6174 rbd_dev->image_format = 0;
6175 kfree(rbd_dev->spec->image_id);
6176 rbd_dev->spec->image_id = NULL;
6180 static ssize_t do_rbd_add(struct bus_type *bus,
6184 struct rbd_device *rbd_dev = NULL;
6185 struct ceph_options *ceph_opts = NULL;
6186 struct rbd_options *rbd_opts = NULL;
6187 struct rbd_spec *spec = NULL;
6188 struct rbd_client *rbdc;
6192 if (!try_module_get(THIS_MODULE))
6195 /* parse add command */
6196 rc = rbd_add_parse_args(buf, &ceph_opts, &rbd_opts, &spec);
6200 rbdc = rbd_get_client(ceph_opts);
6207 rc = rbd_add_get_pool_id(rbdc, spec->pool_name);
6210 pr_info("pool %s does not exist\n", spec->pool_name);
6211 goto err_out_client;
6213 spec->pool_id = (u64)rc;
6215 rbd_dev = rbd_dev_create(rbdc, spec, rbd_opts);
6218 goto err_out_client;
6220 rbdc = NULL; /* rbd_dev now owns this */
6221 spec = NULL; /* rbd_dev now owns this */
6222 rbd_opts = NULL; /* rbd_dev now owns this */
6224 rbd_dev->config_info = kstrdup(buf, GFP_KERNEL);
6225 if (!rbd_dev->config_info) {
6227 goto err_out_rbd_dev;
6230 down_write(&rbd_dev->header_rwsem);
6231 rc = rbd_dev_image_probe(rbd_dev, 0);
6233 up_write(&rbd_dev->header_rwsem);
6234 goto err_out_rbd_dev;
6237 /* If we are mapping a snapshot it must be marked read-only */
6239 read_only = rbd_dev->opts->read_only;
6240 if (rbd_dev->spec->snap_id != CEPH_NOSNAP)
6242 rbd_dev->mapping.read_only = read_only;
6244 rc = rbd_dev_device_setup(rbd_dev);
6247 * rbd_unregister_watch() can't be moved into
6248 * rbd_dev_image_release() without refactoring, see
6249 * commit 1f3ef78861ac.
6251 rbd_unregister_watch(rbd_dev);
6252 rbd_dev_image_release(rbd_dev);
6258 module_put(THIS_MODULE);
6262 rbd_dev_destroy(rbd_dev);
6264 rbd_put_client(rbdc);
6271 static ssize_t rbd_add(struct bus_type *bus,
6278 return do_rbd_add(bus, buf, count);
6281 static ssize_t rbd_add_single_major(struct bus_type *bus,
6285 return do_rbd_add(bus, buf, count);
6288 static void rbd_dev_device_release(struct rbd_device *rbd_dev)
6290 rbd_free_disk(rbd_dev);
6292 spin_lock(&rbd_dev_list_lock);
6293 list_del_init(&rbd_dev->node);
6294 spin_unlock(&rbd_dev_list_lock);
6296 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
6297 device_del(&rbd_dev->dev);
6298 rbd_dev_mapping_clear(rbd_dev);
6300 unregister_blkdev(rbd_dev->major, rbd_dev->name);
6303 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev)
6305 while (rbd_dev->parent) {
6306 struct rbd_device *first = rbd_dev;
6307 struct rbd_device *second = first->parent;
6308 struct rbd_device *third;
6311 * Follow to the parent with no grandparent and
6314 while (second && (third = second->parent)) {
6319 rbd_dev_image_release(second);
6320 first->parent = NULL;
6321 first->parent_overlap = 0;
6323 rbd_assert(first->parent_spec);
6324 rbd_spec_put(first->parent_spec);
6325 first->parent_spec = NULL;
6329 static ssize_t do_rbd_remove(struct bus_type *bus,
6333 struct rbd_device *rbd_dev = NULL;
6334 struct list_head *tmp;
6337 bool already = false;
6343 sscanf(buf, "%d %5s", &dev_id, opt_buf);
6345 pr_err("dev_id out of range\n");
6348 if (opt_buf[0] != '\0') {
6349 if (!strcmp(opt_buf, "force")) {
6352 pr_err("bad remove option at '%s'\n", opt_buf);
6358 spin_lock(&rbd_dev_list_lock);
6359 list_for_each(tmp, &rbd_dev_list) {
6360 rbd_dev = list_entry(tmp, struct rbd_device, node);
6361 if (rbd_dev->dev_id == dev_id) {
6367 spin_lock_irq(&rbd_dev->lock);
6368 if (rbd_dev->open_count && !force)
6371 already = test_and_set_bit(RBD_DEV_FLAG_REMOVING,
6373 spin_unlock_irq(&rbd_dev->lock);
6375 spin_unlock(&rbd_dev_list_lock);
6376 if (ret < 0 || already)
6381 * Prevent new IO from being queued and wait for existing
6382 * IO to complete/fail.
6384 blk_mq_freeze_queue(rbd_dev->disk->queue);
6385 blk_set_queue_dying(rbd_dev->disk->queue);
6388 down_write(&rbd_dev->lock_rwsem);
6389 if (__rbd_is_lock_owner(rbd_dev))
6390 rbd_unlock(rbd_dev);
6391 up_write(&rbd_dev->lock_rwsem);
6392 rbd_unregister_watch(rbd_dev);
6395 * Don't free anything from rbd_dev->disk until after all
6396 * notifies are completely processed. Otherwise
6397 * rbd_bus_del_dev() will race with rbd_watch_cb(), resulting
6398 * in a potential use after free of rbd_dev->disk or rbd_dev.
6400 rbd_dev_device_release(rbd_dev);
6401 rbd_dev_image_release(rbd_dev);
6406 static ssize_t rbd_remove(struct bus_type *bus,
6413 return do_rbd_remove(bus, buf, count);
6416 static ssize_t rbd_remove_single_major(struct bus_type *bus,
6420 return do_rbd_remove(bus, buf, count);
6424 * create control files in sysfs
6427 static int rbd_sysfs_init(void)
6431 ret = device_register(&rbd_root_dev);
6435 ret = bus_register(&rbd_bus_type);
6437 device_unregister(&rbd_root_dev);
6442 static void rbd_sysfs_cleanup(void)
6444 bus_unregister(&rbd_bus_type);
6445 device_unregister(&rbd_root_dev);
6448 static int rbd_slab_init(void)
6450 rbd_assert(!rbd_img_request_cache);
6451 rbd_img_request_cache = KMEM_CACHE(rbd_img_request, 0);
6452 if (!rbd_img_request_cache)
6455 rbd_assert(!rbd_obj_request_cache);
6456 rbd_obj_request_cache = KMEM_CACHE(rbd_obj_request, 0);
6457 if (!rbd_obj_request_cache)
6460 rbd_assert(!rbd_segment_name_cache);
6461 rbd_segment_name_cache = kmem_cache_create("rbd_segment_name",
6462 CEPH_MAX_OID_NAME_LEN + 1, 1, 0, NULL);
6463 if (rbd_segment_name_cache)
6466 kmem_cache_destroy(rbd_obj_request_cache);
6467 rbd_obj_request_cache = NULL;
6469 kmem_cache_destroy(rbd_img_request_cache);
6470 rbd_img_request_cache = NULL;
6475 static void rbd_slab_exit(void)
6477 rbd_assert(rbd_segment_name_cache);
6478 kmem_cache_destroy(rbd_segment_name_cache);
6479 rbd_segment_name_cache = NULL;
6481 rbd_assert(rbd_obj_request_cache);
6482 kmem_cache_destroy(rbd_obj_request_cache);
6483 rbd_obj_request_cache = NULL;
6485 rbd_assert(rbd_img_request_cache);
6486 kmem_cache_destroy(rbd_img_request_cache);
6487 rbd_img_request_cache = NULL;
6490 static int __init rbd_init(void)
6494 if (!libceph_compatible(NULL)) {
6495 rbd_warn(NULL, "libceph incompatibility (quitting)");
6499 rc = rbd_slab_init();
6504 * The number of active work items is limited by the number of
6505 * rbd devices * queue depth, so leave @max_active at default.
6507 rbd_wq = alloc_workqueue(RBD_DRV_NAME, WQ_MEM_RECLAIM, 0);
6514 rbd_major = register_blkdev(0, RBD_DRV_NAME);
6515 if (rbd_major < 0) {
6521 rc = rbd_sysfs_init();
6523 goto err_out_blkdev;
6526 pr_info("loaded (major %d)\n", rbd_major);
6528 pr_info("loaded\n");
6534 unregister_blkdev(rbd_major, RBD_DRV_NAME);
6536 destroy_workqueue(rbd_wq);
6542 static void __exit rbd_exit(void)
6544 ida_destroy(&rbd_dev_id_ida);
6545 rbd_sysfs_cleanup();
6547 unregister_blkdev(rbd_major, RBD_DRV_NAME);
6548 destroy_workqueue(rbd_wq);
6552 module_init(rbd_init);
6553 module_exit(rbd_exit);
6555 MODULE_AUTHOR("Alex Elder <elder@inktank.com>");
6556 MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
6557 MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
6558 /* following authorship retained from original osdblk.c */
6559 MODULE_AUTHOR("Jeff Garzik <jeff@garzik.org>");
6561 MODULE_DESCRIPTION("RADOS Block Device (RBD) driver");
6562 MODULE_LICENSE("GPL");