3 rbd.c -- Export ceph rados objects as a Linux block device
6 based on drivers/block/osdblk.c:
8 Copyright 2009 Red Hat, Inc.
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program; see the file COPYING. If not, write to
21 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 For usage instructions, please refer to:
27 Documentation/ABI/testing/sysfs-bus-rbd
31 #include <linux/ceph/libceph.h>
32 #include <linux/ceph/osd_client.h>
33 #include <linux/ceph/mon_client.h>
34 #include <linux/ceph/cls_lock_client.h>
35 #include <linux/ceph/decode.h>
36 #include <linux/parser.h>
37 #include <linux/bsearch.h>
39 #include <linux/kernel.h>
40 #include <linux/device.h>
41 #include <linux/module.h>
42 #include <linux/blk-mq.h>
44 #include <linux/blkdev.h>
45 #include <linux/slab.h>
46 #include <linux/idr.h>
47 #include <linux/workqueue.h>
49 #include "rbd_types.h"
51 #define RBD_DEBUG /* Activate rbd_assert() calls */
54 * The basic unit of block I/O is a sector. It is interpreted in a
55 * number of contexts in Linux (blk, bio, genhd), but the default is
56 * universally 512 bytes. These symbols are just slightly more
57 * meaningful than the bare numbers they represent.
59 #define SECTOR_SHIFT 9
60 #define SECTOR_SIZE (1ULL << SECTOR_SHIFT)
63 * Increment the given counter and return its updated value.
64 * If the counter is already 0 it will not be incremented.
65 * If the counter is already at its maximum value returns
66 * -EINVAL without updating it.
68 static int atomic_inc_return_safe(atomic_t *v)
72 counter = (unsigned int)__atomic_add_unless(v, 1, 0);
73 if (counter <= (unsigned int)INT_MAX)
81 /* Decrement the counter. Return the resulting value, or -EINVAL */
82 static int atomic_dec_return_safe(atomic_t *v)
86 counter = atomic_dec_return(v);
95 #define RBD_DRV_NAME "rbd"
97 #define RBD_MINORS_PER_MAJOR 256
98 #define RBD_SINGLE_MAJOR_PART_SHIFT 4
100 #define RBD_MAX_PARENT_CHAIN_LEN 16
102 #define RBD_SNAP_DEV_NAME_PREFIX "snap_"
103 #define RBD_MAX_SNAP_NAME_LEN \
104 (NAME_MAX - (sizeof (RBD_SNAP_DEV_NAME_PREFIX) - 1))
106 #define RBD_MAX_SNAP_COUNT 510 /* allows max snapc to fit in 4KB */
108 #define RBD_SNAP_HEAD_NAME "-"
110 #define BAD_SNAP_INDEX U32_MAX /* invalid index into snap array */
112 /* This allows a single page to hold an image name sent by OSD */
113 #define RBD_IMAGE_NAME_LEN_MAX (PAGE_SIZE - sizeof (__le32) - 1)
114 #define RBD_IMAGE_ID_LEN_MAX 64
116 #define RBD_OBJ_PREFIX_LEN_MAX 64
118 #define RBD_NOTIFY_TIMEOUT 5 /* seconds */
119 #define RBD_RETRY_DELAY msecs_to_jiffies(1000)
123 #define RBD_FEATURE_LAYERING (1<<0)
124 #define RBD_FEATURE_STRIPINGV2 (1<<1)
125 #define RBD_FEATURE_EXCLUSIVE_LOCK (1<<2)
126 #define RBD_FEATURES_ALL (RBD_FEATURE_LAYERING | \
127 RBD_FEATURE_STRIPINGV2 | \
128 RBD_FEATURE_EXCLUSIVE_LOCK)
130 /* Features supported by this (client software) implementation. */
132 #define RBD_FEATURES_SUPPORTED (RBD_FEATURES_ALL)
135 * An RBD device name will be "rbd#", where the "rbd" comes from
136 * RBD_DRV_NAME above, and # is a unique integer identifier.
138 #define DEV_NAME_LEN 32
141 * block device image metadata (in-memory version)
143 struct rbd_image_header {
144 /* These six fields never change for a given rbd image */
151 u64 features; /* Might be changeable someday? */
153 /* The remaining fields need to be updated occasionally */
155 struct ceph_snap_context *snapc;
156 char *snap_names; /* format 1 only */
157 u64 *snap_sizes; /* format 1 only */
161 * An rbd image specification.
163 * The tuple (pool_id, image_id, snap_id) is sufficient to uniquely
164 * identify an image. Each rbd_dev structure includes a pointer to
165 * an rbd_spec structure that encapsulates this identity.
167 * Each of the id's in an rbd_spec has an associated name. For a
168 * user-mapped image, the names are supplied and the id's associated
169 * with them are looked up. For a layered image, a parent image is
170 * defined by the tuple, and the names are looked up.
172 * An rbd_dev structure contains a parent_spec pointer which is
173 * non-null if the image it represents is a child in a layered
174 * image. This pointer will refer to the rbd_spec structure used
175 * by the parent rbd_dev for its own identity (i.e., the structure
176 * is shared between the parent and child).
178 * Since these structures are populated once, during the discovery
179 * phase of image construction, they are effectively immutable so
180 * we make no effort to synchronize access to them.
182 * Note that code herein does not assume the image name is known (it
183 * could be a null pointer).
187 const char *pool_name;
189 const char *image_id;
190 const char *image_name;
193 const char *snap_name;
199 * an instance of the client. multiple devices may share an rbd client.
202 struct ceph_client *client;
204 struct list_head node;
207 struct rbd_img_request;
208 typedef void (*rbd_img_callback_t)(struct rbd_img_request *);
210 #define BAD_WHICH U32_MAX /* Good which or bad which, which? */
212 struct rbd_obj_request;
213 typedef void (*rbd_obj_callback_t)(struct rbd_obj_request *);
215 enum obj_request_type {
216 OBJ_REQUEST_NODATA, OBJ_REQUEST_BIO, OBJ_REQUEST_PAGES
219 enum obj_operation_type {
226 OBJ_REQ_DONE, /* completion flag: not done = 0, done = 1 */
227 OBJ_REQ_IMG_DATA, /* object usage: standalone = 0, image = 1 */
228 OBJ_REQ_KNOWN, /* EXISTS flag valid: no = 0, yes = 1 */
229 OBJ_REQ_EXISTS, /* target exists: no = 0, yes = 1 */
232 struct rbd_obj_request {
233 const char *object_name;
234 u64 offset; /* object start byte */
235 u64 length; /* bytes from offset */
239 * An object request associated with an image will have its
240 * img_data flag set; a standalone object request will not.
242 * A standalone object request will have which == BAD_WHICH
243 * and a null obj_request pointer.
245 * An object request initiated in support of a layered image
246 * object (to check for its existence before a write) will
247 * have which == BAD_WHICH and a non-null obj_request pointer.
249 * Finally, an object request for rbd image data will have
250 * which != BAD_WHICH, and will have a non-null img_request
251 * pointer. The value of which will be in the range
252 * 0..(img_request->obj_request_count-1).
255 struct rbd_obj_request *obj_request; /* STAT op */
257 struct rbd_img_request *img_request;
259 /* links for img_request->obj_requests list */
260 struct list_head links;
263 u32 which; /* posn image request list */
265 enum obj_request_type type;
267 struct bio *bio_list;
273 struct page **copyup_pages;
274 u32 copyup_page_count;
276 struct ceph_osd_request *osd_req;
278 u64 xferred; /* bytes transferred */
281 rbd_obj_callback_t callback;
282 struct completion completion;
288 IMG_REQ_WRITE, /* I/O direction: read = 0, write = 1 */
289 IMG_REQ_CHILD, /* initiator: block = 0, child image = 1 */
290 IMG_REQ_LAYERED, /* ENOENT handling: normal = 0, layered = 1 */
291 IMG_REQ_DISCARD, /* discard: normal = 0, discard request = 1 */
294 struct rbd_img_request {
295 struct rbd_device *rbd_dev;
296 u64 offset; /* starting image byte offset */
297 u64 length; /* byte count from offset */
300 u64 snap_id; /* for reads */
301 struct ceph_snap_context *snapc; /* for writes */
304 struct request *rq; /* block request */
305 struct rbd_obj_request *obj_request; /* obj req initiator */
307 struct page **copyup_pages;
308 u32 copyup_page_count;
309 spinlock_t completion_lock;/* protects next_completion */
311 rbd_img_callback_t callback;
312 u64 xferred;/* aggregate bytes transferred */
313 int result; /* first nonzero obj_request result */
315 u32 obj_request_count;
316 struct list_head obj_requests; /* rbd_obj_request structs */
321 #define for_each_obj_request(ireq, oreq) \
322 list_for_each_entry(oreq, &(ireq)->obj_requests, links)
323 #define for_each_obj_request_from(ireq, oreq) \
324 list_for_each_entry_from(oreq, &(ireq)->obj_requests, links)
325 #define for_each_obj_request_safe(ireq, oreq, n) \
326 list_for_each_entry_safe_reverse(oreq, n, &(ireq)->obj_requests, links)
328 enum rbd_watch_state {
329 RBD_WATCH_STATE_UNREGISTERED,
330 RBD_WATCH_STATE_REGISTERED,
331 RBD_WATCH_STATE_ERROR,
334 enum rbd_lock_state {
335 RBD_LOCK_STATE_UNLOCKED,
336 RBD_LOCK_STATE_LOCKED,
337 RBD_LOCK_STATE_RELEASING,
340 /* WatchNotify::ClientId */
341 struct rbd_client_id {
356 int dev_id; /* blkdev unique id */
358 int major; /* blkdev assigned major */
360 struct gendisk *disk; /* blkdev's gendisk and rq */
362 u32 image_format; /* Either 1 or 2 */
363 struct rbd_client *rbd_client;
365 char name[DEV_NAME_LEN]; /* blkdev name, e.g. rbd3 */
367 spinlock_t lock; /* queue, flags, open_count */
369 struct rbd_image_header header;
370 unsigned long flags; /* possibly lock protected */
371 struct rbd_spec *spec;
372 struct rbd_options *opts;
373 char *config_info; /* add{,_single_major} string */
375 struct ceph_object_id header_oid;
376 struct ceph_object_locator header_oloc;
378 struct ceph_file_layout layout; /* used for all rbd requests */
380 struct mutex watch_mutex;
381 enum rbd_watch_state watch_state;
382 struct ceph_osd_linger_request *watch_handle;
384 struct delayed_work watch_dwork;
386 struct rw_semaphore lock_rwsem;
387 enum rbd_lock_state lock_state;
388 struct rbd_client_id owner_cid;
389 struct work_struct acquired_lock_work;
390 struct work_struct released_lock_work;
391 struct delayed_work lock_dwork;
392 struct work_struct unlock_work;
393 wait_queue_head_t lock_waitq;
395 struct workqueue_struct *task_wq;
397 struct rbd_spec *parent_spec;
400 struct rbd_device *parent;
402 /* Block layer tags. */
403 struct blk_mq_tag_set tag_set;
405 /* protects updating the header */
406 struct rw_semaphore header_rwsem;
408 struct rbd_mapping mapping;
410 struct list_head node;
414 unsigned long open_count; /* protected by lock */
418 * Flag bits for rbd_dev->flags. If atomicity is required,
419 * rbd_dev->lock is used to protect access.
421 * Currently, only the "removing" flag (which is coupled with the
422 * "open_count" field) requires atomic access.
425 RBD_DEV_FLAG_EXISTS, /* mapped snapshot has not been deleted */
426 RBD_DEV_FLAG_REMOVING, /* this mapping is being removed */
429 static DEFINE_MUTEX(client_mutex); /* Serialize client creation */
431 static LIST_HEAD(rbd_dev_list); /* devices */
432 static DEFINE_SPINLOCK(rbd_dev_list_lock);
434 static LIST_HEAD(rbd_client_list); /* clients */
435 static DEFINE_SPINLOCK(rbd_client_list_lock);
437 /* Slab caches for frequently-allocated structures */
439 static struct kmem_cache *rbd_img_request_cache;
440 static struct kmem_cache *rbd_obj_request_cache;
441 static struct kmem_cache *rbd_segment_name_cache;
443 static int rbd_major;
444 static DEFINE_IDA(rbd_dev_id_ida);
446 static struct workqueue_struct *rbd_wq;
449 * Default to false for now, as single-major requires >= 0.75 version of
450 * userspace rbd utility.
452 static bool single_major = false;
453 module_param(single_major, bool, S_IRUGO);
454 MODULE_PARM_DESC(single_major, "Use a single major number for all rbd devices (default: false)");
456 static int rbd_img_request_submit(struct rbd_img_request *img_request);
458 static ssize_t rbd_add(struct bus_type *bus, const char *buf,
460 static ssize_t rbd_remove(struct bus_type *bus, const char *buf,
462 static ssize_t rbd_add_single_major(struct bus_type *bus, const char *buf,
464 static ssize_t rbd_remove_single_major(struct bus_type *bus, const char *buf,
466 static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth);
467 static void rbd_spec_put(struct rbd_spec *spec);
469 static int rbd_dev_id_to_minor(int dev_id)
471 return dev_id << RBD_SINGLE_MAJOR_PART_SHIFT;
474 static int minor_to_rbd_dev_id(int minor)
476 return minor >> RBD_SINGLE_MAJOR_PART_SHIFT;
479 static bool rbd_is_lock_supported(struct rbd_device *rbd_dev)
481 return (rbd_dev->header.features & RBD_FEATURE_EXCLUSIVE_LOCK) &&
482 rbd_dev->spec->snap_id == CEPH_NOSNAP &&
483 !rbd_dev->mapping.read_only;
486 static bool __rbd_is_lock_owner(struct rbd_device *rbd_dev)
488 return rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED ||
489 rbd_dev->lock_state == RBD_LOCK_STATE_RELEASING;
492 static bool rbd_is_lock_owner(struct rbd_device *rbd_dev)
496 down_read(&rbd_dev->lock_rwsem);
497 is_lock_owner = __rbd_is_lock_owner(rbd_dev);
498 up_read(&rbd_dev->lock_rwsem);
499 return is_lock_owner;
502 static BUS_ATTR(add, S_IWUSR, NULL, rbd_add);
503 static BUS_ATTR(remove, S_IWUSR, NULL, rbd_remove);
504 static BUS_ATTR(add_single_major, S_IWUSR, NULL, rbd_add_single_major);
505 static BUS_ATTR(remove_single_major, S_IWUSR, NULL, rbd_remove_single_major);
507 static struct attribute *rbd_bus_attrs[] = {
509 &bus_attr_remove.attr,
510 &bus_attr_add_single_major.attr,
511 &bus_attr_remove_single_major.attr,
515 static umode_t rbd_bus_is_visible(struct kobject *kobj,
516 struct attribute *attr, int index)
519 (attr == &bus_attr_add_single_major.attr ||
520 attr == &bus_attr_remove_single_major.attr))
526 static const struct attribute_group rbd_bus_group = {
527 .attrs = rbd_bus_attrs,
528 .is_visible = rbd_bus_is_visible,
530 __ATTRIBUTE_GROUPS(rbd_bus);
532 static struct bus_type rbd_bus_type = {
534 .bus_groups = rbd_bus_groups,
537 static void rbd_root_dev_release(struct device *dev)
541 static struct device rbd_root_dev = {
543 .release = rbd_root_dev_release,
546 static __printf(2, 3)
547 void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...)
549 struct va_format vaf;
557 printk(KERN_WARNING "%s: %pV\n", RBD_DRV_NAME, &vaf);
558 else if (rbd_dev->disk)
559 printk(KERN_WARNING "%s: %s: %pV\n",
560 RBD_DRV_NAME, rbd_dev->disk->disk_name, &vaf);
561 else if (rbd_dev->spec && rbd_dev->spec->image_name)
562 printk(KERN_WARNING "%s: image %s: %pV\n",
563 RBD_DRV_NAME, rbd_dev->spec->image_name, &vaf);
564 else if (rbd_dev->spec && rbd_dev->spec->image_id)
565 printk(KERN_WARNING "%s: id %s: %pV\n",
566 RBD_DRV_NAME, rbd_dev->spec->image_id, &vaf);
568 printk(KERN_WARNING "%s: rbd_dev %p: %pV\n",
569 RBD_DRV_NAME, rbd_dev, &vaf);
574 #define rbd_assert(expr) \
575 if (unlikely(!(expr))) { \
576 printk(KERN_ERR "\nAssertion failure in %s() " \
578 "\trbd_assert(%s);\n\n", \
579 __func__, __LINE__, #expr); \
582 #else /* !RBD_DEBUG */
583 # define rbd_assert(expr) ((void) 0)
584 #endif /* !RBD_DEBUG */
586 static void rbd_osd_copyup_callback(struct rbd_obj_request *obj_request);
587 static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request);
588 static void rbd_img_parent_read(struct rbd_obj_request *obj_request);
589 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
591 static int rbd_dev_refresh(struct rbd_device *rbd_dev);
592 static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev);
593 static int rbd_dev_header_info(struct rbd_device *rbd_dev);
594 static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev);
595 static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
597 static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
598 u8 *order, u64 *snap_size);
599 static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
602 static int rbd_open(struct block_device *bdev, fmode_t mode)
604 struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
605 bool removing = false;
607 if ((mode & FMODE_WRITE) && rbd_dev->mapping.read_only)
610 spin_lock_irq(&rbd_dev->lock);
611 if (test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags))
614 rbd_dev->open_count++;
615 spin_unlock_irq(&rbd_dev->lock);
619 (void) get_device(&rbd_dev->dev);
624 static void rbd_release(struct gendisk *disk, fmode_t mode)
626 struct rbd_device *rbd_dev = disk->private_data;
627 unsigned long open_count_before;
629 spin_lock_irq(&rbd_dev->lock);
630 open_count_before = rbd_dev->open_count--;
631 spin_unlock_irq(&rbd_dev->lock);
632 rbd_assert(open_count_before > 0);
634 put_device(&rbd_dev->dev);
637 static int rbd_ioctl_set_ro(struct rbd_device *rbd_dev, unsigned long arg)
642 bool ro_changed = false;
644 /* get_user() may sleep, so call it before taking rbd_dev->lock */
645 if (get_user(val, (int __user *)(arg)))
648 ro = val ? true : false;
649 /* Snapshot doesn't allow to write*/
650 if (rbd_dev->spec->snap_id != CEPH_NOSNAP && !ro)
653 spin_lock_irq(&rbd_dev->lock);
654 /* prevent others open this device */
655 if (rbd_dev->open_count > 1) {
660 if (rbd_dev->mapping.read_only != ro) {
661 rbd_dev->mapping.read_only = ro;
666 spin_unlock_irq(&rbd_dev->lock);
667 /* set_disk_ro() may sleep, so call it after releasing rbd_dev->lock */
668 if (ret == 0 && ro_changed)
669 set_disk_ro(rbd_dev->disk, ro ? 1 : 0);
674 static int rbd_ioctl(struct block_device *bdev, fmode_t mode,
675 unsigned int cmd, unsigned long arg)
677 struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
682 ret = rbd_ioctl_set_ro(rbd_dev, arg);
692 static int rbd_compat_ioctl(struct block_device *bdev, fmode_t mode,
693 unsigned int cmd, unsigned long arg)
695 return rbd_ioctl(bdev, mode, cmd, arg);
697 #endif /* CONFIG_COMPAT */
699 static const struct block_device_operations rbd_bd_ops = {
700 .owner = THIS_MODULE,
702 .release = rbd_release,
705 .compat_ioctl = rbd_compat_ioctl,
710 * Initialize an rbd client instance. Success or not, this function
711 * consumes ceph_opts. Caller holds client_mutex.
713 static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts)
715 struct rbd_client *rbdc;
718 dout("%s:\n", __func__);
719 rbdc = kmalloc(sizeof(struct rbd_client), GFP_KERNEL);
723 kref_init(&rbdc->kref);
724 INIT_LIST_HEAD(&rbdc->node);
726 rbdc->client = ceph_create_client(ceph_opts, rbdc, 0, 0);
727 if (IS_ERR(rbdc->client))
729 ceph_opts = NULL; /* Now rbdc->client is responsible for ceph_opts */
731 ret = ceph_open_session(rbdc->client);
735 spin_lock(&rbd_client_list_lock);
736 list_add_tail(&rbdc->node, &rbd_client_list);
737 spin_unlock(&rbd_client_list_lock);
739 dout("%s: rbdc %p\n", __func__, rbdc);
743 ceph_destroy_client(rbdc->client);
748 ceph_destroy_options(ceph_opts);
749 dout("%s: error %d\n", __func__, ret);
754 static struct rbd_client *__rbd_get_client(struct rbd_client *rbdc)
756 kref_get(&rbdc->kref);
762 * Find a ceph client with specific addr and configuration. If
763 * found, bump its reference count.
765 static struct rbd_client *rbd_client_find(struct ceph_options *ceph_opts)
767 struct rbd_client *client_node;
770 if (ceph_opts->flags & CEPH_OPT_NOSHARE)
773 spin_lock(&rbd_client_list_lock);
774 list_for_each_entry(client_node, &rbd_client_list, node) {
775 if (!ceph_compare_options(ceph_opts, client_node->client)) {
776 __rbd_get_client(client_node);
782 spin_unlock(&rbd_client_list_lock);
784 return found ? client_node : NULL;
788 * (Per device) rbd map options
795 /* string args above */
802 static match_table_t rbd_opts_tokens = {
803 {Opt_queue_depth, "queue_depth=%d"},
805 /* string args above */
806 {Opt_read_only, "read_only"},
807 {Opt_read_only, "ro"}, /* Alternate spelling */
808 {Opt_read_write, "read_write"},
809 {Opt_read_write, "rw"}, /* Alternate spelling */
810 {Opt_lock_on_read, "lock_on_read"},
820 #define RBD_QUEUE_DEPTH_DEFAULT BLKDEV_MAX_RQ
821 #define RBD_READ_ONLY_DEFAULT false
822 #define RBD_LOCK_ON_READ_DEFAULT false
824 static int parse_rbd_opts_token(char *c, void *private)
826 struct rbd_options *rbd_opts = private;
827 substring_t argstr[MAX_OPT_ARGS];
828 int token, intval, ret;
830 token = match_token(c, rbd_opts_tokens, argstr);
831 if (token < Opt_last_int) {
832 ret = match_int(&argstr[0], &intval);
834 pr_err("bad mount option arg (not int) at '%s'\n", c);
837 dout("got int token %d val %d\n", token, intval);
838 } else if (token > Opt_last_int && token < Opt_last_string) {
839 dout("got string token %d val %s\n", token, argstr[0].from);
841 dout("got token %d\n", token);
845 case Opt_queue_depth:
847 pr_err("queue_depth out of range\n");
850 rbd_opts->queue_depth = intval;
853 rbd_opts->read_only = true;
856 rbd_opts->read_only = false;
858 case Opt_lock_on_read:
859 rbd_opts->lock_on_read = true;
862 /* libceph prints "bad option" msg */
869 static char* obj_op_name(enum obj_operation_type op_type)
884 * Get a ceph client with specific addr and configuration, if one does
885 * not exist create it. Either way, ceph_opts is consumed by this
888 static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts)
890 struct rbd_client *rbdc;
892 mutex_lock_nested(&client_mutex, SINGLE_DEPTH_NESTING);
893 rbdc = rbd_client_find(ceph_opts);
894 if (rbdc) /* using an existing client */
895 ceph_destroy_options(ceph_opts);
897 rbdc = rbd_client_create(ceph_opts);
898 mutex_unlock(&client_mutex);
904 * Destroy ceph client
906 * Caller must hold rbd_client_list_lock.
908 static void rbd_client_release(struct kref *kref)
910 struct rbd_client *rbdc = container_of(kref, struct rbd_client, kref);
912 dout("%s: rbdc %p\n", __func__, rbdc);
913 spin_lock(&rbd_client_list_lock);
914 list_del(&rbdc->node);
915 spin_unlock(&rbd_client_list_lock);
917 ceph_destroy_client(rbdc->client);
922 * Drop reference to ceph client node. If it's not referenced anymore, release
925 static void rbd_put_client(struct rbd_client *rbdc)
928 kref_put(&rbdc->kref, rbd_client_release);
931 static bool rbd_image_format_valid(u32 image_format)
933 return image_format == 1 || image_format == 2;
936 static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk *ondisk)
941 /* The header has to start with the magic rbd header text */
942 if (memcmp(&ondisk->text, RBD_HEADER_TEXT, sizeof (RBD_HEADER_TEXT)))
945 /* The bio layer requires at least sector-sized I/O */
947 if (ondisk->options.order < SECTOR_SHIFT)
950 /* If we use u64 in a few spots we may be able to loosen this */
952 if (ondisk->options.order > 8 * sizeof (int) - 1)
956 * The size of a snapshot header has to fit in a size_t, and
957 * that limits the number of snapshots.
959 snap_count = le32_to_cpu(ondisk->snap_count);
960 size = SIZE_MAX - sizeof (struct ceph_snap_context);
961 if (snap_count > size / sizeof (__le64))
965 * Not only that, but the size of the entire the snapshot
966 * header must also be representable in a size_t.
968 size -= snap_count * sizeof (__le64);
969 if ((u64) size < le64_to_cpu(ondisk->snap_names_len))
976 * Fill an rbd image header with information from the given format 1
979 static int rbd_header_from_disk(struct rbd_device *rbd_dev,
980 struct rbd_image_header_ondisk *ondisk)
982 struct rbd_image_header *header = &rbd_dev->header;
983 bool first_time = header->object_prefix == NULL;
984 struct ceph_snap_context *snapc;
985 char *object_prefix = NULL;
986 char *snap_names = NULL;
987 u64 *snap_sizes = NULL;
993 /* Allocate this now to avoid having to handle failure below */
998 len = strnlen(ondisk->object_prefix,
999 sizeof (ondisk->object_prefix));
1000 object_prefix = kmalloc(len + 1, GFP_KERNEL);
1003 memcpy(object_prefix, ondisk->object_prefix, len);
1004 object_prefix[len] = '\0';
1007 /* Allocate the snapshot context and fill it in */
1009 snap_count = le32_to_cpu(ondisk->snap_count);
1010 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
1013 snapc->seq = le64_to_cpu(ondisk->snap_seq);
1015 struct rbd_image_snap_ondisk *snaps;
1016 u64 snap_names_len = le64_to_cpu(ondisk->snap_names_len);
1018 /* We'll keep a copy of the snapshot names... */
1020 if (snap_names_len > (u64)SIZE_MAX)
1022 snap_names = kmalloc(snap_names_len, GFP_KERNEL);
1026 /* ...as well as the array of their sizes. */
1028 size = snap_count * sizeof (*header->snap_sizes);
1029 snap_sizes = kmalloc(size, GFP_KERNEL);
1034 * Copy the names, and fill in each snapshot's id
1037 * Note that rbd_dev_v1_header_info() guarantees the
1038 * ondisk buffer we're working with has
1039 * snap_names_len bytes beyond the end of the
1040 * snapshot id array, this memcpy() is safe.
1042 memcpy(snap_names, &ondisk->snaps[snap_count], snap_names_len);
1043 snaps = ondisk->snaps;
1044 for (i = 0; i < snap_count; i++) {
1045 snapc->snaps[i] = le64_to_cpu(snaps[i].id);
1046 snap_sizes[i] = le64_to_cpu(snaps[i].image_size);
1050 /* We won't fail any more, fill in the header */
1053 header->object_prefix = object_prefix;
1054 header->obj_order = ondisk->options.order;
1055 header->crypt_type = ondisk->options.crypt_type;
1056 header->comp_type = ondisk->options.comp_type;
1057 /* The rest aren't used for format 1 images */
1058 header->stripe_unit = 0;
1059 header->stripe_count = 0;
1060 header->features = 0;
1062 ceph_put_snap_context(header->snapc);
1063 kfree(header->snap_names);
1064 kfree(header->snap_sizes);
1067 /* The remaining fields always get updated (when we refresh) */
1069 header->image_size = le64_to_cpu(ondisk->image_size);
1070 header->snapc = snapc;
1071 header->snap_names = snap_names;
1072 header->snap_sizes = snap_sizes;
1080 ceph_put_snap_context(snapc);
1081 kfree(object_prefix);
1086 static const char *_rbd_dev_v1_snap_name(struct rbd_device *rbd_dev, u32 which)
1088 const char *snap_name;
1090 rbd_assert(which < rbd_dev->header.snapc->num_snaps);
1092 /* Skip over names until we find the one we are looking for */
1094 snap_name = rbd_dev->header.snap_names;
1096 snap_name += strlen(snap_name) + 1;
1098 return kstrdup(snap_name, GFP_KERNEL);
1102 * Snapshot id comparison function for use with qsort()/bsearch().
1103 * Note that result is for snapshots in *descending* order.
1105 static int snapid_compare_reverse(const void *s1, const void *s2)
1107 u64 snap_id1 = *(u64 *)s1;
1108 u64 snap_id2 = *(u64 *)s2;
1110 if (snap_id1 < snap_id2)
1112 return snap_id1 == snap_id2 ? 0 : -1;
1116 * Search a snapshot context to see if the given snapshot id is
1119 * Returns the position of the snapshot id in the array if it's found,
1120 * or BAD_SNAP_INDEX otherwise.
1122 * Note: The snapshot array is in kept sorted (by the osd) in
1123 * reverse order, highest snapshot id first.
1125 static u32 rbd_dev_snap_index(struct rbd_device *rbd_dev, u64 snap_id)
1127 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
1130 found = bsearch(&snap_id, &snapc->snaps, snapc->num_snaps,
1131 sizeof (snap_id), snapid_compare_reverse);
1133 return found ? (u32)(found - &snapc->snaps[0]) : BAD_SNAP_INDEX;
1136 static const char *rbd_dev_v1_snap_name(struct rbd_device *rbd_dev,
1140 const char *snap_name;
1142 which = rbd_dev_snap_index(rbd_dev, snap_id);
1143 if (which == BAD_SNAP_INDEX)
1144 return ERR_PTR(-ENOENT);
1146 snap_name = _rbd_dev_v1_snap_name(rbd_dev, which);
1147 return snap_name ? snap_name : ERR_PTR(-ENOMEM);
1150 static const char *rbd_snap_name(struct rbd_device *rbd_dev, u64 snap_id)
1152 if (snap_id == CEPH_NOSNAP)
1153 return RBD_SNAP_HEAD_NAME;
1155 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1156 if (rbd_dev->image_format == 1)
1157 return rbd_dev_v1_snap_name(rbd_dev, snap_id);
1159 return rbd_dev_v2_snap_name(rbd_dev, snap_id);
1162 static int rbd_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
1165 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1166 if (snap_id == CEPH_NOSNAP) {
1167 *snap_size = rbd_dev->header.image_size;
1168 } else if (rbd_dev->image_format == 1) {
1171 which = rbd_dev_snap_index(rbd_dev, snap_id);
1172 if (which == BAD_SNAP_INDEX)
1175 *snap_size = rbd_dev->header.snap_sizes[which];
1180 ret = _rbd_dev_v2_snap_size(rbd_dev, snap_id, NULL, &size);
1189 static int rbd_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
1192 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1193 if (snap_id == CEPH_NOSNAP) {
1194 *snap_features = rbd_dev->header.features;
1195 } else if (rbd_dev->image_format == 1) {
1196 *snap_features = 0; /* No features for format 1 */
1201 ret = _rbd_dev_v2_snap_features(rbd_dev, snap_id, &features);
1205 *snap_features = features;
1210 static int rbd_dev_mapping_set(struct rbd_device *rbd_dev)
1212 u64 snap_id = rbd_dev->spec->snap_id;
1217 ret = rbd_snap_size(rbd_dev, snap_id, &size);
1220 ret = rbd_snap_features(rbd_dev, snap_id, &features);
1224 rbd_dev->mapping.size = size;
1225 rbd_dev->mapping.features = features;
1230 static void rbd_dev_mapping_clear(struct rbd_device *rbd_dev)
1232 rbd_dev->mapping.size = 0;
1233 rbd_dev->mapping.features = 0;
1236 static void rbd_segment_name_free(const char *name)
1238 /* The explicit cast here is needed to drop the const qualifier */
1240 kmem_cache_free(rbd_segment_name_cache, (void *)name);
1243 static const char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset)
1250 name = kmem_cache_alloc(rbd_segment_name_cache, GFP_NOIO);
1253 segment = offset >> rbd_dev->header.obj_order;
1254 name_format = "%s.%012llx";
1255 if (rbd_dev->image_format == 2)
1256 name_format = "%s.%016llx";
1257 ret = snprintf(name, CEPH_MAX_OID_NAME_LEN + 1, name_format,
1258 rbd_dev->header.object_prefix, segment);
1259 if (ret < 0 || ret > CEPH_MAX_OID_NAME_LEN) {
1260 pr_err("error formatting segment name for #%llu (%d)\n",
1262 rbd_segment_name_free(name);
1269 static u64 rbd_segment_offset(struct rbd_device *rbd_dev, u64 offset)
1271 u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
1273 return offset & (segment_size - 1);
1276 static u64 rbd_segment_length(struct rbd_device *rbd_dev,
1277 u64 offset, u64 length)
1279 u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
1281 offset &= segment_size - 1;
1283 rbd_assert(length <= U64_MAX - offset);
1284 if (offset + length > segment_size)
1285 length = segment_size - offset;
1291 * returns the size of an object in the image
1293 static u64 rbd_obj_bytes(struct rbd_image_header *header)
1295 return 1 << header->obj_order;
1302 static void bio_chain_put(struct bio *chain)
1308 chain = chain->bi_next;
1314 * zeros a bio chain, starting at specific offset
1316 static void zero_bio_chain(struct bio *chain, int start_ofs)
1319 struct bvec_iter iter;
1320 unsigned long flags;
1325 bio_for_each_segment(bv, chain, iter) {
1326 if (pos + bv.bv_len > start_ofs) {
1327 int remainder = max(start_ofs - pos, 0);
1328 buf = bvec_kmap_irq(&bv, &flags);
1329 memset(buf + remainder, 0,
1330 bv.bv_len - remainder);
1331 flush_dcache_page(bv.bv_page);
1332 bvec_kunmap_irq(buf, &flags);
1337 chain = chain->bi_next;
1342 * similar to zero_bio_chain(), zeros data defined by a page array,
1343 * starting at the given byte offset from the start of the array and
1344 * continuing up to the given end offset. The pages array is
1345 * assumed to be big enough to hold all bytes up to the end.
1347 static void zero_pages(struct page **pages, u64 offset, u64 end)
1349 struct page **page = &pages[offset >> PAGE_SHIFT];
1351 rbd_assert(end > offset);
1352 rbd_assert(end - offset <= (u64)SIZE_MAX);
1353 while (offset < end) {
1356 unsigned long flags;
1359 page_offset = offset & ~PAGE_MASK;
1360 length = min_t(size_t, PAGE_SIZE - page_offset, end - offset);
1361 local_irq_save(flags);
1362 kaddr = kmap_atomic(*page);
1363 memset(kaddr + page_offset, 0, length);
1364 flush_dcache_page(*page);
1365 kunmap_atomic(kaddr);
1366 local_irq_restore(flags);
1374 * Clone a portion of a bio, starting at the given byte offset
1375 * and continuing for the number of bytes indicated.
1377 static struct bio *bio_clone_range(struct bio *bio_src,
1378 unsigned int offset,
1384 bio = bio_clone(bio_src, gfpmask);
1386 return NULL; /* ENOMEM */
1388 bio_advance(bio, offset);
1389 bio->bi_iter.bi_size = len;
1395 * Clone a portion of a bio chain, starting at the given byte offset
1396 * into the first bio in the source chain and continuing for the
1397 * number of bytes indicated. The result is another bio chain of
1398 * exactly the given length, or a null pointer on error.
1400 * The bio_src and offset parameters are both in-out. On entry they
1401 * refer to the first source bio and the offset into that bio where
1402 * the start of data to be cloned is located.
1404 * On return, bio_src is updated to refer to the bio in the source
1405 * chain that contains first un-cloned byte, and *offset will
1406 * contain the offset of that byte within that bio.
1408 static struct bio *bio_chain_clone_range(struct bio **bio_src,
1409 unsigned int *offset,
1413 struct bio *bi = *bio_src;
1414 unsigned int off = *offset;
1415 struct bio *chain = NULL;
1418 /* Build up a chain of clone bios up to the limit */
1420 if (!bi || off >= bi->bi_iter.bi_size || !len)
1421 return NULL; /* Nothing to clone */
1425 unsigned int bi_size;
1429 rbd_warn(NULL, "bio_chain exhausted with %u left", len);
1430 goto out_err; /* EINVAL; ran out of bio's */
1432 bi_size = min_t(unsigned int, bi->bi_iter.bi_size - off, len);
1433 bio = bio_clone_range(bi, off, bi_size, gfpmask);
1435 goto out_err; /* ENOMEM */
1438 end = &bio->bi_next;
1441 if (off == bi->bi_iter.bi_size) {
1452 bio_chain_put(chain);
1458 * The default/initial value for all object request flags is 0. For
1459 * each flag, once its value is set to 1 it is never reset to 0
1462 static void obj_request_img_data_set(struct rbd_obj_request *obj_request)
1464 if (test_and_set_bit(OBJ_REQ_IMG_DATA, &obj_request->flags)) {
1465 struct rbd_device *rbd_dev;
1467 rbd_dev = obj_request->img_request->rbd_dev;
1468 rbd_warn(rbd_dev, "obj_request %p already marked img_data",
1473 static bool obj_request_img_data_test(struct rbd_obj_request *obj_request)
1476 return test_bit(OBJ_REQ_IMG_DATA, &obj_request->flags) != 0;
1479 static void obj_request_done_set(struct rbd_obj_request *obj_request)
1481 if (test_and_set_bit(OBJ_REQ_DONE, &obj_request->flags)) {
1482 struct rbd_device *rbd_dev = NULL;
1484 if (obj_request_img_data_test(obj_request))
1485 rbd_dev = obj_request->img_request->rbd_dev;
1486 rbd_warn(rbd_dev, "obj_request %p already marked done",
1491 static bool obj_request_done_test(struct rbd_obj_request *obj_request)
1494 return test_bit(OBJ_REQ_DONE, &obj_request->flags) != 0;
1498 * This sets the KNOWN flag after (possibly) setting the EXISTS
1499 * flag. The latter is set based on the "exists" value provided.
1501 * Note that for our purposes once an object exists it never goes
1502 * away again. It's possible that the response from two existence
1503 * checks are separated by the creation of the target object, and
1504 * the first ("doesn't exist") response arrives *after* the second
1505 * ("does exist"). In that case we ignore the second one.
1507 static void obj_request_existence_set(struct rbd_obj_request *obj_request,
1511 set_bit(OBJ_REQ_EXISTS, &obj_request->flags);
1512 set_bit(OBJ_REQ_KNOWN, &obj_request->flags);
1516 static bool obj_request_known_test(struct rbd_obj_request *obj_request)
1519 return test_bit(OBJ_REQ_KNOWN, &obj_request->flags) != 0;
1522 static bool obj_request_exists_test(struct rbd_obj_request *obj_request)
1525 return test_bit(OBJ_REQ_EXISTS, &obj_request->flags) != 0;
1528 static bool obj_request_overlaps_parent(struct rbd_obj_request *obj_request)
1530 struct rbd_device *rbd_dev = obj_request->img_request->rbd_dev;
1532 return obj_request->img_offset <
1533 round_up(rbd_dev->parent_overlap, rbd_obj_bytes(&rbd_dev->header));
1536 static void rbd_obj_request_get(struct rbd_obj_request *obj_request)
1538 dout("%s: obj %p (was %d)\n", __func__, obj_request,
1539 atomic_read(&obj_request->kref.refcount));
1540 kref_get(&obj_request->kref);
1543 static void rbd_obj_request_destroy(struct kref *kref);
1544 static void rbd_obj_request_put(struct rbd_obj_request *obj_request)
1546 rbd_assert(obj_request != NULL);
1547 dout("%s: obj %p (was %d)\n", __func__, obj_request,
1548 atomic_read(&obj_request->kref.refcount));
1549 kref_put(&obj_request->kref, rbd_obj_request_destroy);
1552 static void rbd_img_request_get(struct rbd_img_request *img_request)
1554 dout("%s: img %p (was %d)\n", __func__, img_request,
1555 atomic_read(&img_request->kref.refcount));
1556 kref_get(&img_request->kref);
1559 static bool img_request_child_test(struct rbd_img_request *img_request);
1560 static void rbd_parent_request_destroy(struct kref *kref);
1561 static void rbd_img_request_destroy(struct kref *kref);
1562 static void rbd_img_request_put(struct rbd_img_request *img_request)
1564 rbd_assert(img_request != NULL);
1565 dout("%s: img %p (was %d)\n", __func__, img_request,
1566 atomic_read(&img_request->kref.refcount));
1567 if (img_request_child_test(img_request))
1568 kref_put(&img_request->kref, rbd_parent_request_destroy);
1570 kref_put(&img_request->kref, rbd_img_request_destroy);
1573 static inline void rbd_img_obj_request_add(struct rbd_img_request *img_request,
1574 struct rbd_obj_request *obj_request)
1576 rbd_assert(obj_request->img_request == NULL);
1578 /* Image request now owns object's original reference */
1579 obj_request->img_request = img_request;
1580 obj_request->which = img_request->obj_request_count;
1581 rbd_assert(!obj_request_img_data_test(obj_request));
1582 obj_request_img_data_set(obj_request);
1583 rbd_assert(obj_request->which != BAD_WHICH);
1584 img_request->obj_request_count++;
1585 list_add_tail(&obj_request->links, &img_request->obj_requests);
1586 dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
1587 obj_request->which);
1590 static inline void rbd_img_obj_request_del(struct rbd_img_request *img_request,
1591 struct rbd_obj_request *obj_request)
1593 rbd_assert(obj_request->which != BAD_WHICH);
1595 dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
1596 obj_request->which);
1597 list_del(&obj_request->links);
1598 rbd_assert(img_request->obj_request_count > 0);
1599 img_request->obj_request_count--;
1600 rbd_assert(obj_request->which == img_request->obj_request_count);
1601 obj_request->which = BAD_WHICH;
1602 rbd_assert(obj_request_img_data_test(obj_request));
1603 rbd_assert(obj_request->img_request == img_request);
1604 obj_request->img_request = NULL;
1605 obj_request->callback = NULL;
1606 rbd_obj_request_put(obj_request);
1609 static bool obj_request_type_valid(enum obj_request_type type)
1612 case OBJ_REQUEST_NODATA:
1613 case OBJ_REQUEST_BIO:
1614 case OBJ_REQUEST_PAGES:
1621 static void rbd_obj_request_submit(struct rbd_obj_request *obj_request)
1623 struct ceph_osd_request *osd_req = obj_request->osd_req;
1625 dout("%s %p osd_req %p\n", __func__, obj_request, osd_req);
1626 ceph_osdc_start_request(osd_req->r_osdc, osd_req, false);
1629 static void rbd_obj_request_end(struct rbd_obj_request *obj_request)
1631 dout("%s %p\n", __func__, obj_request);
1632 ceph_osdc_cancel_request(obj_request->osd_req);
1636 * Wait for an object request to complete. If interrupted, cancel the
1637 * underlying osd request.
1639 * @timeout: in jiffies, 0 means "wait forever"
1641 static int __rbd_obj_request_wait(struct rbd_obj_request *obj_request,
1642 unsigned long timeout)
1646 dout("%s %p\n", __func__, obj_request);
1647 ret = wait_for_completion_interruptible_timeout(
1648 &obj_request->completion,
1649 ceph_timeout_jiffies(timeout));
1653 rbd_obj_request_end(obj_request);
1658 dout("%s %p ret %d\n", __func__, obj_request, (int)ret);
1662 static int rbd_obj_request_wait(struct rbd_obj_request *obj_request)
1664 return __rbd_obj_request_wait(obj_request, 0);
1667 static void rbd_img_request_complete(struct rbd_img_request *img_request)
1670 dout("%s: img %p\n", __func__, img_request);
1673 * If no error occurred, compute the aggregate transfer
1674 * count for the image request. We could instead use
1675 * atomic64_cmpxchg() to update it as each object request
1676 * completes; not clear which way is better off hand.
1678 if (!img_request->result) {
1679 struct rbd_obj_request *obj_request;
1682 for_each_obj_request(img_request, obj_request)
1683 xferred += obj_request->xferred;
1684 img_request->xferred = xferred;
1687 if (img_request->callback)
1688 img_request->callback(img_request);
1690 rbd_img_request_put(img_request);
1694 * The default/initial value for all image request flags is 0. Each
1695 * is conditionally set to 1 at image request initialization time
1696 * and currently never change thereafter.
1698 static void img_request_write_set(struct rbd_img_request *img_request)
1700 set_bit(IMG_REQ_WRITE, &img_request->flags);
1704 static bool img_request_write_test(struct rbd_img_request *img_request)
1707 return test_bit(IMG_REQ_WRITE, &img_request->flags) != 0;
1711 * Set the discard flag when the img_request is an discard request
1713 static void img_request_discard_set(struct rbd_img_request *img_request)
1715 set_bit(IMG_REQ_DISCARD, &img_request->flags);
1719 static bool img_request_discard_test(struct rbd_img_request *img_request)
1722 return test_bit(IMG_REQ_DISCARD, &img_request->flags) != 0;
1725 static void img_request_child_set(struct rbd_img_request *img_request)
1727 set_bit(IMG_REQ_CHILD, &img_request->flags);
1731 static void img_request_child_clear(struct rbd_img_request *img_request)
1733 clear_bit(IMG_REQ_CHILD, &img_request->flags);
1737 static bool img_request_child_test(struct rbd_img_request *img_request)
1740 return test_bit(IMG_REQ_CHILD, &img_request->flags) != 0;
1743 static void img_request_layered_set(struct rbd_img_request *img_request)
1745 set_bit(IMG_REQ_LAYERED, &img_request->flags);
1749 static void img_request_layered_clear(struct rbd_img_request *img_request)
1751 clear_bit(IMG_REQ_LAYERED, &img_request->flags);
1755 static bool img_request_layered_test(struct rbd_img_request *img_request)
1758 return test_bit(IMG_REQ_LAYERED, &img_request->flags) != 0;
1761 static enum obj_operation_type
1762 rbd_img_request_op_type(struct rbd_img_request *img_request)
1764 if (img_request_write_test(img_request))
1765 return OBJ_OP_WRITE;
1766 else if (img_request_discard_test(img_request))
1767 return OBJ_OP_DISCARD;
1773 rbd_img_obj_request_read_callback(struct rbd_obj_request *obj_request)
1775 u64 xferred = obj_request->xferred;
1776 u64 length = obj_request->length;
1778 dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
1779 obj_request, obj_request->img_request, obj_request->result,
1782 * ENOENT means a hole in the image. We zero-fill the entire
1783 * length of the request. A short read also implies zero-fill
1784 * to the end of the request. An error requires the whole
1785 * length of the request to be reported finished with an error
1786 * to the block layer. In each case we update the xferred
1787 * count to indicate the whole request was satisfied.
1789 rbd_assert(obj_request->type != OBJ_REQUEST_NODATA);
1790 if (obj_request->result == -ENOENT) {
1791 if (obj_request->type == OBJ_REQUEST_BIO)
1792 zero_bio_chain(obj_request->bio_list, 0);
1794 zero_pages(obj_request->pages, 0, length);
1795 obj_request->result = 0;
1796 } else if (xferred < length && !obj_request->result) {
1797 if (obj_request->type == OBJ_REQUEST_BIO)
1798 zero_bio_chain(obj_request->bio_list, xferred);
1800 zero_pages(obj_request->pages, xferred, length);
1802 obj_request->xferred = length;
1803 obj_request_done_set(obj_request);
1806 static void rbd_obj_request_complete(struct rbd_obj_request *obj_request)
1808 dout("%s: obj %p cb %p\n", __func__, obj_request,
1809 obj_request->callback);
1810 if (obj_request->callback)
1811 obj_request->callback(obj_request);
1813 complete_all(&obj_request->completion);
1816 static void rbd_osd_read_callback(struct rbd_obj_request *obj_request)
1818 struct rbd_img_request *img_request = NULL;
1819 struct rbd_device *rbd_dev = NULL;
1820 bool layered = false;
1822 if (obj_request_img_data_test(obj_request)) {
1823 img_request = obj_request->img_request;
1824 layered = img_request && img_request_layered_test(img_request);
1825 rbd_dev = img_request->rbd_dev;
1828 dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
1829 obj_request, img_request, obj_request->result,
1830 obj_request->xferred, obj_request->length);
1831 if (layered && obj_request->result == -ENOENT &&
1832 obj_request->img_offset < rbd_dev->parent_overlap)
1833 rbd_img_parent_read(obj_request);
1834 else if (img_request)
1835 rbd_img_obj_request_read_callback(obj_request);
1837 obj_request_done_set(obj_request);
1840 static void rbd_osd_write_callback(struct rbd_obj_request *obj_request)
1842 dout("%s: obj %p result %d %llu\n", __func__, obj_request,
1843 obj_request->result, obj_request->length);
1845 * There is no such thing as a successful short write. Set
1846 * it to our originally-requested length.
1848 obj_request->xferred = obj_request->length;
1849 obj_request_done_set(obj_request);
1852 static void rbd_osd_discard_callback(struct rbd_obj_request *obj_request)
1854 dout("%s: obj %p result %d %llu\n", __func__, obj_request,
1855 obj_request->result, obj_request->length);
1857 * There is no such thing as a successful short discard. Set
1858 * it to our originally-requested length.
1860 obj_request->xferred = obj_request->length;
1861 /* discarding a non-existent object is not a problem */
1862 if (obj_request->result == -ENOENT)
1863 obj_request->result = 0;
1864 obj_request_done_set(obj_request);
1868 * For a simple stat call there's nothing to do. We'll do more if
1869 * this is part of a write sequence for a layered image.
1871 static void rbd_osd_stat_callback(struct rbd_obj_request *obj_request)
1873 dout("%s: obj %p\n", __func__, obj_request);
1874 obj_request_done_set(obj_request);
1877 static void rbd_osd_call_callback(struct rbd_obj_request *obj_request)
1879 dout("%s: obj %p\n", __func__, obj_request);
1881 if (obj_request_img_data_test(obj_request))
1882 rbd_osd_copyup_callback(obj_request);
1884 obj_request_done_set(obj_request);
1887 static void rbd_osd_req_callback(struct ceph_osd_request *osd_req)
1889 struct rbd_obj_request *obj_request = osd_req->r_priv;
1892 dout("%s: osd_req %p\n", __func__, osd_req);
1893 rbd_assert(osd_req == obj_request->osd_req);
1894 if (obj_request_img_data_test(obj_request)) {
1895 rbd_assert(obj_request->img_request);
1896 rbd_assert(obj_request->which != BAD_WHICH);
1898 rbd_assert(obj_request->which == BAD_WHICH);
1901 if (osd_req->r_result < 0)
1902 obj_request->result = osd_req->r_result;
1905 * We support a 64-bit length, but ultimately it has to be
1906 * passed to the block layer, which just supports a 32-bit
1909 obj_request->xferred = osd_req->r_ops[0].outdata_len;
1910 rbd_assert(obj_request->xferred < (u64)UINT_MAX);
1912 opcode = osd_req->r_ops[0].op;
1914 case CEPH_OSD_OP_READ:
1915 rbd_osd_read_callback(obj_request);
1917 case CEPH_OSD_OP_SETALLOCHINT:
1918 rbd_assert(osd_req->r_ops[1].op == CEPH_OSD_OP_WRITE ||
1919 osd_req->r_ops[1].op == CEPH_OSD_OP_WRITEFULL);
1921 case CEPH_OSD_OP_WRITE:
1922 case CEPH_OSD_OP_WRITEFULL:
1923 rbd_osd_write_callback(obj_request);
1925 case CEPH_OSD_OP_STAT:
1926 rbd_osd_stat_callback(obj_request);
1928 case CEPH_OSD_OP_DELETE:
1929 case CEPH_OSD_OP_TRUNCATE:
1930 case CEPH_OSD_OP_ZERO:
1931 rbd_osd_discard_callback(obj_request);
1933 case CEPH_OSD_OP_CALL:
1934 rbd_osd_call_callback(obj_request);
1937 rbd_warn(NULL, "%s: unsupported op %hu",
1938 obj_request->object_name, (unsigned short) opcode);
1942 if (obj_request_done_test(obj_request))
1943 rbd_obj_request_complete(obj_request);
1946 static void rbd_osd_req_format_read(struct rbd_obj_request *obj_request)
1948 struct rbd_img_request *img_request = obj_request->img_request;
1949 struct ceph_osd_request *osd_req = obj_request->osd_req;
1952 osd_req->r_snapid = img_request->snap_id;
1955 static void rbd_osd_req_format_write(struct rbd_obj_request *obj_request)
1957 struct ceph_osd_request *osd_req = obj_request->osd_req;
1959 osd_req->r_mtime = CURRENT_TIME;
1960 osd_req->r_data_offset = obj_request->offset;
1964 * Create an osd request. A read request has one osd op (read).
1965 * A write request has either one (watch) or two (hint+write) osd ops.
1966 * (All rbd data writes are prefixed with an allocation hint op, but
1967 * technically osd watch is a write request, hence this distinction.)
1969 static struct ceph_osd_request *rbd_osd_req_create(
1970 struct rbd_device *rbd_dev,
1971 enum obj_operation_type op_type,
1972 unsigned int num_ops,
1973 struct rbd_obj_request *obj_request)
1975 struct ceph_snap_context *snapc = NULL;
1976 struct ceph_osd_client *osdc;
1977 struct ceph_osd_request *osd_req;
1979 if (obj_request_img_data_test(obj_request) &&
1980 (op_type == OBJ_OP_DISCARD || op_type == OBJ_OP_WRITE)) {
1981 struct rbd_img_request *img_request = obj_request->img_request;
1982 if (op_type == OBJ_OP_WRITE) {
1983 rbd_assert(img_request_write_test(img_request));
1985 rbd_assert(img_request_discard_test(img_request));
1987 snapc = img_request->snapc;
1990 rbd_assert(num_ops == 1 || ((op_type == OBJ_OP_WRITE) && num_ops == 2));
1992 /* Allocate and initialize the request, for the num_ops ops */
1994 osdc = &rbd_dev->rbd_client->client->osdc;
1995 osd_req = ceph_osdc_alloc_request(osdc, snapc, num_ops, false,
2000 if (op_type == OBJ_OP_WRITE || op_type == OBJ_OP_DISCARD)
2001 osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
2003 osd_req->r_flags = CEPH_OSD_FLAG_READ;
2005 osd_req->r_callback = rbd_osd_req_callback;
2006 osd_req->r_priv = obj_request;
2008 osd_req->r_base_oloc.pool = rbd_dev->layout.pool_id;
2009 if (ceph_oid_aprintf(&osd_req->r_base_oid, GFP_NOIO, "%s",
2010 obj_request->object_name))
2013 if (ceph_osdc_alloc_messages(osd_req, GFP_NOIO))
2019 ceph_osdc_put_request(osd_req);
2024 * Create a copyup osd request based on the information in the object
2025 * request supplied. A copyup request has two or three osd ops, a
2026 * copyup method call, potentially a hint op, and a write or truncate
2029 static struct ceph_osd_request *
2030 rbd_osd_req_create_copyup(struct rbd_obj_request *obj_request)
2032 struct rbd_img_request *img_request;
2033 struct ceph_snap_context *snapc;
2034 struct rbd_device *rbd_dev;
2035 struct ceph_osd_client *osdc;
2036 struct ceph_osd_request *osd_req;
2037 int num_osd_ops = 3;
2039 rbd_assert(obj_request_img_data_test(obj_request));
2040 img_request = obj_request->img_request;
2041 rbd_assert(img_request);
2042 rbd_assert(img_request_write_test(img_request) ||
2043 img_request_discard_test(img_request));
2045 if (img_request_discard_test(img_request))
2048 /* Allocate and initialize the request, for all the ops */
2050 snapc = img_request->snapc;
2051 rbd_dev = img_request->rbd_dev;
2052 osdc = &rbd_dev->rbd_client->client->osdc;
2053 osd_req = ceph_osdc_alloc_request(osdc, snapc, num_osd_ops,
2058 osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
2059 osd_req->r_callback = rbd_osd_req_callback;
2060 osd_req->r_priv = obj_request;
2062 osd_req->r_base_oloc.pool = rbd_dev->layout.pool_id;
2063 if (ceph_oid_aprintf(&osd_req->r_base_oid, GFP_NOIO, "%s",
2064 obj_request->object_name))
2067 if (ceph_osdc_alloc_messages(osd_req, GFP_NOIO))
2073 ceph_osdc_put_request(osd_req);
2078 static void rbd_osd_req_destroy(struct ceph_osd_request *osd_req)
2080 ceph_osdc_put_request(osd_req);
2083 /* object_name is assumed to be a non-null pointer and NUL-terminated */
2085 static struct rbd_obj_request *rbd_obj_request_create(const char *object_name,
2086 u64 offset, u64 length,
2087 enum obj_request_type type)
2089 struct rbd_obj_request *obj_request;
2093 rbd_assert(obj_request_type_valid(type));
2095 size = strlen(object_name) + 1;
2096 name = kmalloc(size, GFP_NOIO);
2100 obj_request = kmem_cache_zalloc(rbd_obj_request_cache, GFP_NOIO);
2106 obj_request->object_name = memcpy(name, object_name, size);
2107 obj_request->offset = offset;
2108 obj_request->length = length;
2109 obj_request->flags = 0;
2110 obj_request->which = BAD_WHICH;
2111 obj_request->type = type;
2112 INIT_LIST_HEAD(&obj_request->links);
2113 init_completion(&obj_request->completion);
2114 kref_init(&obj_request->kref);
2116 dout("%s: \"%s\" %llu/%llu %d -> obj %p\n", __func__, object_name,
2117 offset, length, (int)type, obj_request);
2122 static void rbd_obj_request_destroy(struct kref *kref)
2124 struct rbd_obj_request *obj_request;
2126 obj_request = container_of(kref, struct rbd_obj_request, kref);
2128 dout("%s: obj %p\n", __func__, obj_request);
2130 rbd_assert(obj_request->img_request == NULL);
2131 rbd_assert(obj_request->which == BAD_WHICH);
2133 if (obj_request->osd_req)
2134 rbd_osd_req_destroy(obj_request->osd_req);
2136 rbd_assert(obj_request_type_valid(obj_request->type));
2137 switch (obj_request->type) {
2138 case OBJ_REQUEST_NODATA:
2139 break; /* Nothing to do */
2140 case OBJ_REQUEST_BIO:
2141 if (obj_request->bio_list)
2142 bio_chain_put(obj_request->bio_list);
2144 case OBJ_REQUEST_PAGES:
2145 if (obj_request->pages)
2146 ceph_release_page_vector(obj_request->pages,
2147 obj_request->page_count);
2151 kfree(obj_request->object_name);
2152 obj_request->object_name = NULL;
2153 kmem_cache_free(rbd_obj_request_cache, obj_request);
2156 /* It's OK to call this for a device with no parent */
2158 static void rbd_spec_put(struct rbd_spec *spec);
2159 static void rbd_dev_unparent(struct rbd_device *rbd_dev)
2161 rbd_dev_remove_parent(rbd_dev);
2162 rbd_spec_put(rbd_dev->parent_spec);
2163 rbd_dev->parent_spec = NULL;
2164 rbd_dev->parent_overlap = 0;
2168 * Parent image reference counting is used to determine when an
2169 * image's parent fields can be safely torn down--after there are no
2170 * more in-flight requests to the parent image. When the last
2171 * reference is dropped, cleaning them up is safe.
2173 static void rbd_dev_parent_put(struct rbd_device *rbd_dev)
2177 if (!rbd_dev->parent_spec)
2180 counter = atomic_dec_return_safe(&rbd_dev->parent_ref);
2184 /* Last reference; clean up parent data structures */
2187 rbd_dev_unparent(rbd_dev);
2189 rbd_warn(rbd_dev, "parent reference underflow");
2193 * If an image has a non-zero parent overlap, get a reference to its
2196 * Returns true if the rbd device has a parent with a non-zero
2197 * overlap and a reference for it was successfully taken, or
2200 static bool rbd_dev_parent_get(struct rbd_device *rbd_dev)
2204 if (!rbd_dev->parent_spec)
2207 down_read(&rbd_dev->header_rwsem);
2208 if (rbd_dev->parent_overlap)
2209 counter = atomic_inc_return_safe(&rbd_dev->parent_ref);
2210 up_read(&rbd_dev->header_rwsem);
2213 rbd_warn(rbd_dev, "parent reference overflow");
2219 * Caller is responsible for filling in the list of object requests
2220 * that comprises the image request, and the Linux request pointer
2221 * (if there is one).
2223 static struct rbd_img_request *rbd_img_request_create(
2224 struct rbd_device *rbd_dev,
2225 u64 offset, u64 length,
2226 enum obj_operation_type op_type,
2227 struct ceph_snap_context *snapc)
2229 struct rbd_img_request *img_request;
2231 img_request = kmem_cache_alloc(rbd_img_request_cache, GFP_NOIO);
2235 img_request->rq = NULL;
2236 img_request->rbd_dev = rbd_dev;
2237 img_request->offset = offset;
2238 img_request->length = length;
2239 img_request->flags = 0;
2240 if (op_type == OBJ_OP_DISCARD) {
2241 img_request_discard_set(img_request);
2242 img_request->snapc = snapc;
2243 } else if (op_type == OBJ_OP_WRITE) {
2244 img_request_write_set(img_request);
2245 img_request->snapc = snapc;
2247 img_request->snap_id = rbd_dev->spec->snap_id;
2249 if (rbd_dev_parent_get(rbd_dev))
2250 img_request_layered_set(img_request);
2251 spin_lock_init(&img_request->completion_lock);
2252 img_request->next_completion = 0;
2253 img_request->callback = NULL;
2254 img_request->result = 0;
2255 img_request->obj_request_count = 0;
2256 INIT_LIST_HEAD(&img_request->obj_requests);
2257 kref_init(&img_request->kref);
2259 dout("%s: rbd_dev %p %s %llu/%llu -> img %p\n", __func__, rbd_dev,
2260 obj_op_name(op_type), offset, length, img_request);
2265 static void rbd_img_request_destroy(struct kref *kref)
2267 struct rbd_img_request *img_request;
2268 struct rbd_obj_request *obj_request;
2269 struct rbd_obj_request *next_obj_request;
2271 img_request = container_of(kref, struct rbd_img_request, kref);
2273 dout("%s: img %p\n", __func__, img_request);
2275 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
2276 rbd_img_obj_request_del(img_request, obj_request);
2277 rbd_assert(img_request->obj_request_count == 0);
2279 if (img_request_layered_test(img_request)) {
2280 img_request_layered_clear(img_request);
2281 rbd_dev_parent_put(img_request->rbd_dev);
2284 if (img_request_write_test(img_request) ||
2285 img_request_discard_test(img_request))
2286 ceph_put_snap_context(img_request->snapc);
2288 kmem_cache_free(rbd_img_request_cache, img_request);
2291 static struct rbd_img_request *rbd_parent_request_create(
2292 struct rbd_obj_request *obj_request,
2293 u64 img_offset, u64 length)
2295 struct rbd_img_request *parent_request;
2296 struct rbd_device *rbd_dev;
2298 rbd_assert(obj_request->img_request);
2299 rbd_dev = obj_request->img_request->rbd_dev;
2301 parent_request = rbd_img_request_create(rbd_dev->parent, img_offset,
2302 length, OBJ_OP_READ, NULL);
2303 if (!parent_request)
2306 img_request_child_set(parent_request);
2307 rbd_obj_request_get(obj_request);
2308 parent_request->obj_request = obj_request;
2310 return parent_request;
2313 static void rbd_parent_request_destroy(struct kref *kref)
2315 struct rbd_img_request *parent_request;
2316 struct rbd_obj_request *orig_request;
2318 parent_request = container_of(kref, struct rbd_img_request, kref);
2319 orig_request = parent_request->obj_request;
2321 parent_request->obj_request = NULL;
2322 rbd_obj_request_put(orig_request);
2323 img_request_child_clear(parent_request);
2325 rbd_img_request_destroy(kref);
2328 static bool rbd_img_obj_end_request(struct rbd_obj_request *obj_request)
2330 struct rbd_img_request *img_request;
2331 unsigned int xferred;
2335 rbd_assert(obj_request_img_data_test(obj_request));
2336 img_request = obj_request->img_request;
2338 rbd_assert(obj_request->xferred <= (u64)UINT_MAX);
2339 xferred = (unsigned int)obj_request->xferred;
2340 result = obj_request->result;
2342 struct rbd_device *rbd_dev = img_request->rbd_dev;
2343 enum obj_operation_type op_type;
2345 if (img_request_discard_test(img_request))
2346 op_type = OBJ_OP_DISCARD;
2347 else if (img_request_write_test(img_request))
2348 op_type = OBJ_OP_WRITE;
2350 op_type = OBJ_OP_READ;
2352 rbd_warn(rbd_dev, "%s %llx at %llx (%llx)",
2353 obj_op_name(op_type), obj_request->length,
2354 obj_request->img_offset, obj_request->offset);
2355 rbd_warn(rbd_dev, " result %d xferred %x",
2357 if (!img_request->result)
2358 img_request->result = result;
2360 * Need to end I/O on the entire obj_request worth of
2361 * bytes in case of error.
2363 xferred = obj_request->length;
2366 /* Image object requests don't own their page array */
2368 if (obj_request->type == OBJ_REQUEST_PAGES) {
2369 obj_request->pages = NULL;
2370 obj_request->page_count = 0;
2373 if (img_request_child_test(img_request)) {
2374 rbd_assert(img_request->obj_request != NULL);
2375 more = obj_request->which < img_request->obj_request_count - 1;
2377 rbd_assert(img_request->rq != NULL);
2379 more = blk_update_request(img_request->rq, result, xferred);
2381 __blk_mq_end_request(img_request->rq, result);
2387 static void rbd_img_obj_callback(struct rbd_obj_request *obj_request)
2389 struct rbd_img_request *img_request;
2390 u32 which = obj_request->which;
2393 rbd_assert(obj_request_img_data_test(obj_request));
2394 img_request = obj_request->img_request;
2396 dout("%s: img %p obj %p\n", __func__, img_request, obj_request);
2397 rbd_assert(img_request != NULL);
2398 rbd_assert(img_request->obj_request_count > 0);
2399 rbd_assert(which != BAD_WHICH);
2400 rbd_assert(which < img_request->obj_request_count);
2402 spin_lock_irq(&img_request->completion_lock);
2403 if (which != img_request->next_completion)
2406 for_each_obj_request_from(img_request, obj_request) {
2408 rbd_assert(which < img_request->obj_request_count);
2410 if (!obj_request_done_test(obj_request))
2412 more = rbd_img_obj_end_request(obj_request);
2416 rbd_assert(more ^ (which == img_request->obj_request_count));
2417 img_request->next_completion = which;
2419 spin_unlock_irq(&img_request->completion_lock);
2420 rbd_img_request_put(img_request);
2423 rbd_img_request_complete(img_request);
2427 * Add individual osd ops to the given ceph_osd_request and prepare
2428 * them for submission. num_ops is the current number of
2429 * osd operations already to the object request.
2431 static void rbd_img_obj_request_fill(struct rbd_obj_request *obj_request,
2432 struct ceph_osd_request *osd_request,
2433 enum obj_operation_type op_type,
2434 unsigned int num_ops)
2436 struct rbd_img_request *img_request = obj_request->img_request;
2437 struct rbd_device *rbd_dev = img_request->rbd_dev;
2438 u64 object_size = rbd_obj_bytes(&rbd_dev->header);
2439 u64 offset = obj_request->offset;
2440 u64 length = obj_request->length;
2444 if (op_type == OBJ_OP_DISCARD) {
2445 if (!offset && length == object_size &&
2446 (!img_request_layered_test(img_request) ||
2447 !obj_request_overlaps_parent(obj_request))) {
2448 opcode = CEPH_OSD_OP_DELETE;
2449 } else if ((offset + length == object_size)) {
2450 opcode = CEPH_OSD_OP_TRUNCATE;
2452 down_read(&rbd_dev->header_rwsem);
2453 img_end = rbd_dev->header.image_size;
2454 up_read(&rbd_dev->header_rwsem);
2456 if (obj_request->img_offset + length == img_end)
2457 opcode = CEPH_OSD_OP_TRUNCATE;
2459 opcode = CEPH_OSD_OP_ZERO;
2461 } else if (op_type == OBJ_OP_WRITE) {
2462 if (!offset && length == object_size)
2463 opcode = CEPH_OSD_OP_WRITEFULL;
2465 opcode = CEPH_OSD_OP_WRITE;
2466 osd_req_op_alloc_hint_init(osd_request, num_ops,
2467 object_size, object_size);
2470 opcode = CEPH_OSD_OP_READ;
2473 if (opcode == CEPH_OSD_OP_DELETE)
2474 osd_req_op_init(osd_request, num_ops, opcode, 0);
2476 osd_req_op_extent_init(osd_request, num_ops, opcode,
2477 offset, length, 0, 0);
2479 if (obj_request->type == OBJ_REQUEST_BIO)
2480 osd_req_op_extent_osd_data_bio(osd_request, num_ops,
2481 obj_request->bio_list, length);
2482 else if (obj_request->type == OBJ_REQUEST_PAGES)
2483 osd_req_op_extent_osd_data_pages(osd_request, num_ops,
2484 obj_request->pages, length,
2485 offset & ~PAGE_MASK, false, false);
2487 /* Discards are also writes */
2488 if (op_type == OBJ_OP_WRITE || op_type == OBJ_OP_DISCARD)
2489 rbd_osd_req_format_write(obj_request);
2491 rbd_osd_req_format_read(obj_request);
2495 * Split up an image request into one or more object requests, each
2496 * to a different object. The "type" parameter indicates whether
2497 * "data_desc" is the pointer to the head of a list of bio
2498 * structures, or the base of a page array. In either case this
2499 * function assumes data_desc describes memory sufficient to hold
2500 * all data described by the image request.
2502 static int rbd_img_request_fill(struct rbd_img_request *img_request,
2503 enum obj_request_type type,
2506 struct rbd_device *rbd_dev = img_request->rbd_dev;
2507 struct rbd_obj_request *obj_request = NULL;
2508 struct rbd_obj_request *next_obj_request;
2509 struct bio *bio_list = NULL;
2510 unsigned int bio_offset = 0;
2511 struct page **pages = NULL;
2512 enum obj_operation_type op_type;
2516 dout("%s: img %p type %d data_desc %p\n", __func__, img_request,
2517 (int)type, data_desc);
2519 img_offset = img_request->offset;
2520 resid = img_request->length;
2521 rbd_assert(resid > 0);
2522 op_type = rbd_img_request_op_type(img_request);
2524 if (type == OBJ_REQUEST_BIO) {
2525 bio_list = data_desc;
2526 rbd_assert(img_offset ==
2527 bio_list->bi_iter.bi_sector << SECTOR_SHIFT);
2528 } else if (type == OBJ_REQUEST_PAGES) {
2533 struct ceph_osd_request *osd_req;
2534 const char *object_name;
2538 object_name = rbd_segment_name(rbd_dev, img_offset);
2541 offset = rbd_segment_offset(rbd_dev, img_offset);
2542 length = rbd_segment_length(rbd_dev, img_offset, resid);
2543 obj_request = rbd_obj_request_create(object_name,
2544 offset, length, type);
2545 /* object request has its own copy of the object name */
2546 rbd_segment_name_free(object_name);
2551 * set obj_request->img_request before creating the
2552 * osd_request so that it gets the right snapc
2554 rbd_img_obj_request_add(img_request, obj_request);
2556 if (type == OBJ_REQUEST_BIO) {
2557 unsigned int clone_size;
2559 rbd_assert(length <= (u64)UINT_MAX);
2560 clone_size = (unsigned int)length;
2561 obj_request->bio_list =
2562 bio_chain_clone_range(&bio_list,
2566 if (!obj_request->bio_list)
2568 } else if (type == OBJ_REQUEST_PAGES) {
2569 unsigned int page_count;
2571 obj_request->pages = pages;
2572 page_count = (u32)calc_pages_for(offset, length);
2573 obj_request->page_count = page_count;
2574 if ((offset + length) & ~PAGE_MASK)
2575 page_count--; /* more on last page */
2576 pages += page_count;
2579 osd_req = rbd_osd_req_create(rbd_dev, op_type,
2580 (op_type == OBJ_OP_WRITE) ? 2 : 1,
2585 obj_request->osd_req = osd_req;
2586 obj_request->callback = rbd_img_obj_callback;
2587 obj_request->img_offset = img_offset;
2589 rbd_img_obj_request_fill(obj_request, osd_req, op_type, 0);
2591 rbd_img_request_get(img_request);
2593 img_offset += length;
2600 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
2601 rbd_img_obj_request_del(img_request, obj_request);
2607 rbd_osd_copyup_callback(struct rbd_obj_request *obj_request)
2609 struct rbd_img_request *img_request;
2610 struct rbd_device *rbd_dev;
2611 struct page **pages;
2614 dout("%s: obj %p\n", __func__, obj_request);
2616 rbd_assert(obj_request->type == OBJ_REQUEST_BIO ||
2617 obj_request->type == OBJ_REQUEST_NODATA);
2618 rbd_assert(obj_request_img_data_test(obj_request));
2619 img_request = obj_request->img_request;
2620 rbd_assert(img_request);
2622 rbd_dev = img_request->rbd_dev;
2623 rbd_assert(rbd_dev);
2625 pages = obj_request->copyup_pages;
2626 rbd_assert(pages != NULL);
2627 obj_request->copyup_pages = NULL;
2628 page_count = obj_request->copyup_page_count;
2629 rbd_assert(page_count);
2630 obj_request->copyup_page_count = 0;
2631 ceph_release_page_vector(pages, page_count);
2634 * We want the transfer count to reflect the size of the
2635 * original write request. There is no such thing as a
2636 * successful short write, so if the request was successful
2637 * we can just set it to the originally-requested length.
2639 if (!obj_request->result)
2640 obj_request->xferred = obj_request->length;
2642 obj_request_done_set(obj_request);
2646 rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request)
2648 struct rbd_obj_request *orig_request;
2649 struct ceph_osd_request *osd_req;
2650 struct rbd_device *rbd_dev;
2651 struct page **pages;
2652 enum obj_operation_type op_type;
2657 rbd_assert(img_request_child_test(img_request));
2659 /* First get what we need from the image request */
2661 pages = img_request->copyup_pages;
2662 rbd_assert(pages != NULL);
2663 img_request->copyup_pages = NULL;
2664 page_count = img_request->copyup_page_count;
2665 rbd_assert(page_count);
2666 img_request->copyup_page_count = 0;
2668 orig_request = img_request->obj_request;
2669 rbd_assert(orig_request != NULL);
2670 rbd_assert(obj_request_type_valid(orig_request->type));
2671 img_result = img_request->result;
2672 parent_length = img_request->length;
2673 rbd_assert(parent_length == img_request->xferred);
2674 rbd_img_request_put(img_request);
2676 rbd_assert(orig_request->img_request);
2677 rbd_dev = orig_request->img_request->rbd_dev;
2678 rbd_assert(rbd_dev);
2681 * If the overlap has become 0 (most likely because the
2682 * image has been flattened) we need to free the pages
2683 * and re-submit the original write request.
2685 if (!rbd_dev->parent_overlap) {
2686 ceph_release_page_vector(pages, page_count);
2687 rbd_obj_request_submit(orig_request);
2695 * The original osd request is of no use to use any more.
2696 * We need a new one that can hold the three ops in a copyup
2697 * request. Allocate the new copyup osd request for the
2698 * original request, and release the old one.
2700 img_result = -ENOMEM;
2701 osd_req = rbd_osd_req_create_copyup(orig_request);
2704 rbd_osd_req_destroy(orig_request->osd_req);
2705 orig_request->osd_req = osd_req;
2706 orig_request->copyup_pages = pages;
2707 orig_request->copyup_page_count = page_count;
2709 /* Initialize the copyup op */
2711 osd_req_op_cls_init(osd_req, 0, CEPH_OSD_OP_CALL, "rbd", "copyup");
2712 osd_req_op_cls_request_data_pages(osd_req, 0, pages, parent_length, 0,
2715 /* Add the other op(s) */
2717 op_type = rbd_img_request_op_type(orig_request->img_request);
2718 rbd_img_obj_request_fill(orig_request, osd_req, op_type, 1);
2720 /* All set, send it off. */
2722 rbd_obj_request_submit(orig_request);
2726 /* Record the error code and complete the request */
2728 orig_request->result = img_result;
2729 orig_request->xferred = 0;
2730 obj_request_done_set(orig_request);
2731 rbd_obj_request_complete(orig_request);
2735 * Read from the parent image the range of data that covers the
2736 * entire target of the given object request. This is used for
2737 * satisfying a layered image write request when the target of an
2738 * object request from the image request does not exist.
2740 * A page array big enough to hold the returned data is allocated
2741 * and supplied to rbd_img_request_fill() as the "data descriptor."
2742 * When the read completes, this page array will be transferred to
2743 * the original object request for the copyup operation.
2745 * If an error occurs, record it as the result of the original
2746 * object request and mark it done so it gets completed.
2748 static int rbd_img_obj_parent_read_full(struct rbd_obj_request *obj_request)
2750 struct rbd_img_request *img_request = NULL;
2751 struct rbd_img_request *parent_request = NULL;
2752 struct rbd_device *rbd_dev;
2755 struct page **pages = NULL;
2759 rbd_assert(obj_request_img_data_test(obj_request));
2760 rbd_assert(obj_request_type_valid(obj_request->type));
2762 img_request = obj_request->img_request;
2763 rbd_assert(img_request != NULL);
2764 rbd_dev = img_request->rbd_dev;
2765 rbd_assert(rbd_dev->parent != NULL);
2768 * Determine the byte range covered by the object in the
2769 * child image to which the original request was to be sent.
2771 img_offset = obj_request->img_offset - obj_request->offset;
2772 length = (u64)1 << rbd_dev->header.obj_order;
2775 * There is no defined parent data beyond the parent
2776 * overlap, so limit what we read at that boundary if
2779 if (img_offset + length > rbd_dev->parent_overlap) {
2780 rbd_assert(img_offset < rbd_dev->parent_overlap);
2781 length = rbd_dev->parent_overlap - img_offset;
2785 * Allocate a page array big enough to receive the data read
2788 page_count = (u32)calc_pages_for(0, length);
2789 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2790 if (IS_ERR(pages)) {
2791 result = PTR_ERR(pages);
2797 parent_request = rbd_parent_request_create(obj_request,
2798 img_offset, length);
2799 if (!parent_request)
2802 result = rbd_img_request_fill(parent_request, OBJ_REQUEST_PAGES, pages);
2805 parent_request->copyup_pages = pages;
2806 parent_request->copyup_page_count = page_count;
2808 parent_request->callback = rbd_img_obj_parent_read_full_callback;
2809 result = rbd_img_request_submit(parent_request);
2813 parent_request->copyup_pages = NULL;
2814 parent_request->copyup_page_count = 0;
2815 parent_request->obj_request = NULL;
2816 rbd_obj_request_put(obj_request);
2819 ceph_release_page_vector(pages, page_count);
2821 rbd_img_request_put(parent_request);
2822 obj_request->result = result;
2823 obj_request->xferred = 0;
2824 obj_request_done_set(obj_request);
2829 static void rbd_img_obj_exists_callback(struct rbd_obj_request *obj_request)
2831 struct rbd_obj_request *orig_request;
2832 struct rbd_device *rbd_dev;
2835 rbd_assert(!obj_request_img_data_test(obj_request));
2838 * All we need from the object request is the original
2839 * request and the result of the STAT op. Grab those, then
2840 * we're done with the request.
2842 orig_request = obj_request->obj_request;
2843 obj_request->obj_request = NULL;
2844 rbd_obj_request_put(orig_request);
2845 rbd_assert(orig_request);
2846 rbd_assert(orig_request->img_request);
2848 result = obj_request->result;
2849 obj_request->result = 0;
2851 dout("%s: obj %p for obj %p result %d %llu/%llu\n", __func__,
2852 obj_request, orig_request, result,
2853 obj_request->xferred, obj_request->length);
2854 rbd_obj_request_put(obj_request);
2857 * If the overlap has become 0 (most likely because the
2858 * image has been flattened) we need to re-submit the
2861 rbd_dev = orig_request->img_request->rbd_dev;
2862 if (!rbd_dev->parent_overlap) {
2863 rbd_obj_request_submit(orig_request);
2868 * Our only purpose here is to determine whether the object
2869 * exists, and we don't want to treat the non-existence as
2870 * an error. If something else comes back, transfer the
2871 * error to the original request and complete it now.
2874 obj_request_existence_set(orig_request, true);
2875 } else if (result == -ENOENT) {
2876 obj_request_existence_set(orig_request, false);
2877 } else if (result) {
2878 orig_request->result = result;
2883 * Resubmit the original request now that we have recorded
2884 * whether the target object exists.
2886 orig_request->result = rbd_img_obj_request_submit(orig_request);
2888 if (orig_request->result)
2889 rbd_obj_request_complete(orig_request);
2892 static int rbd_img_obj_exists_submit(struct rbd_obj_request *obj_request)
2894 struct rbd_obj_request *stat_request;
2895 struct rbd_device *rbd_dev;
2896 struct page **pages = NULL;
2902 * The response data for a STAT call consists of:
2909 size = sizeof (__le64) + sizeof (__le32) + sizeof (__le32);
2910 page_count = (u32)calc_pages_for(0, size);
2911 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2913 return PTR_ERR(pages);
2916 stat_request = rbd_obj_request_create(obj_request->object_name, 0, 0,
2921 rbd_obj_request_get(obj_request);
2922 stat_request->obj_request = obj_request;
2923 stat_request->pages = pages;
2924 stat_request->page_count = page_count;
2926 rbd_assert(obj_request->img_request);
2927 rbd_dev = obj_request->img_request->rbd_dev;
2928 stat_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_READ, 1,
2930 if (!stat_request->osd_req)
2932 stat_request->callback = rbd_img_obj_exists_callback;
2934 osd_req_op_init(stat_request->osd_req, 0, CEPH_OSD_OP_STAT, 0);
2935 osd_req_op_raw_data_in_pages(stat_request->osd_req, 0, pages, size, 0,
2937 rbd_osd_req_format_read(stat_request);
2939 rbd_obj_request_submit(stat_request);
2944 rbd_obj_request_put(obj_request);
2949 static bool img_obj_request_simple(struct rbd_obj_request *obj_request)
2951 struct rbd_img_request *img_request;
2952 struct rbd_device *rbd_dev;
2954 rbd_assert(obj_request_img_data_test(obj_request));
2956 img_request = obj_request->img_request;
2957 rbd_assert(img_request);
2958 rbd_dev = img_request->rbd_dev;
2961 if (!img_request_write_test(img_request) &&
2962 !img_request_discard_test(img_request))
2965 /* Non-layered writes */
2966 if (!img_request_layered_test(img_request))
2970 * Layered writes outside of the parent overlap range don't
2971 * share any data with the parent.
2973 if (!obj_request_overlaps_parent(obj_request))
2977 * Entire-object layered writes - we will overwrite whatever
2978 * parent data there is anyway.
2980 if (!obj_request->offset &&
2981 obj_request->length == rbd_obj_bytes(&rbd_dev->header))
2985 * If the object is known to already exist, its parent data has
2986 * already been copied.
2988 if (obj_request_known_test(obj_request) &&
2989 obj_request_exists_test(obj_request))
2995 static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request)
2997 if (img_obj_request_simple(obj_request)) {
2998 rbd_obj_request_submit(obj_request);
3003 * It's a layered write. The target object might exist but
3004 * we may not know that yet. If we know it doesn't exist,
3005 * start by reading the data for the full target object from
3006 * the parent so we can use it for a copyup to the target.
3008 if (obj_request_known_test(obj_request))
3009 return rbd_img_obj_parent_read_full(obj_request);
3011 /* We don't know whether the target exists. Go find out. */
3013 return rbd_img_obj_exists_submit(obj_request);
3016 static int rbd_img_request_submit(struct rbd_img_request *img_request)
3018 struct rbd_obj_request *obj_request;
3019 struct rbd_obj_request *next_obj_request;
3022 dout("%s: img %p\n", __func__, img_request);
3024 rbd_img_request_get(img_request);
3025 for_each_obj_request_safe(img_request, obj_request, next_obj_request) {
3026 ret = rbd_img_obj_request_submit(obj_request);
3032 rbd_img_request_put(img_request);
3036 static void rbd_img_parent_read_callback(struct rbd_img_request *img_request)
3038 struct rbd_obj_request *obj_request;
3039 struct rbd_device *rbd_dev;
3044 rbd_assert(img_request_child_test(img_request));
3046 /* First get what we need from the image request and release it */
3048 obj_request = img_request->obj_request;
3049 img_xferred = img_request->xferred;
3050 img_result = img_request->result;
3051 rbd_img_request_put(img_request);
3054 * If the overlap has become 0 (most likely because the
3055 * image has been flattened) we need to re-submit the
3058 rbd_assert(obj_request);
3059 rbd_assert(obj_request->img_request);
3060 rbd_dev = obj_request->img_request->rbd_dev;
3061 if (!rbd_dev->parent_overlap) {
3062 rbd_obj_request_submit(obj_request);
3066 obj_request->result = img_result;
3067 if (obj_request->result)
3071 * We need to zero anything beyond the parent overlap
3072 * boundary. Since rbd_img_obj_request_read_callback()
3073 * will zero anything beyond the end of a short read, an
3074 * easy way to do this is to pretend the data from the
3075 * parent came up short--ending at the overlap boundary.
3077 rbd_assert(obj_request->img_offset < U64_MAX - obj_request->length);
3078 obj_end = obj_request->img_offset + obj_request->length;
3079 if (obj_end > rbd_dev->parent_overlap) {
3082 if (obj_request->img_offset < rbd_dev->parent_overlap)
3083 xferred = rbd_dev->parent_overlap -
3084 obj_request->img_offset;
3086 obj_request->xferred = min(img_xferred, xferred);
3088 obj_request->xferred = img_xferred;
3091 rbd_img_obj_request_read_callback(obj_request);
3092 rbd_obj_request_complete(obj_request);
3095 static void rbd_img_parent_read(struct rbd_obj_request *obj_request)
3097 struct rbd_img_request *img_request;
3100 rbd_assert(obj_request_img_data_test(obj_request));
3101 rbd_assert(obj_request->img_request != NULL);
3102 rbd_assert(obj_request->result == (s32) -ENOENT);
3103 rbd_assert(obj_request_type_valid(obj_request->type));
3105 /* rbd_read_finish(obj_request, obj_request->length); */
3106 img_request = rbd_parent_request_create(obj_request,
3107 obj_request->img_offset,
3108 obj_request->length);
3113 if (obj_request->type == OBJ_REQUEST_BIO)
3114 result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
3115 obj_request->bio_list);
3117 result = rbd_img_request_fill(img_request, OBJ_REQUEST_PAGES,
3118 obj_request->pages);
3122 img_request->callback = rbd_img_parent_read_callback;
3123 result = rbd_img_request_submit(img_request);
3130 rbd_img_request_put(img_request);
3131 obj_request->result = result;
3132 obj_request->xferred = 0;
3133 obj_request_done_set(obj_request);
3136 static const struct rbd_client_id rbd_empty_cid;
3138 static bool rbd_cid_equal(const struct rbd_client_id *lhs,
3139 const struct rbd_client_id *rhs)
3141 return lhs->gid == rhs->gid && lhs->handle == rhs->handle;
3144 static struct rbd_client_id rbd_get_cid(struct rbd_device *rbd_dev)
3146 struct rbd_client_id cid;
3148 mutex_lock(&rbd_dev->watch_mutex);
3149 cid.gid = ceph_client_gid(rbd_dev->rbd_client->client);
3150 cid.handle = rbd_dev->watch_cookie;
3151 mutex_unlock(&rbd_dev->watch_mutex);
3156 * lock_rwsem must be held for write
3158 static void rbd_set_owner_cid(struct rbd_device *rbd_dev,
3159 const struct rbd_client_id *cid)
3161 dout("%s rbd_dev %p %llu-%llu -> %llu-%llu\n", __func__, rbd_dev,
3162 rbd_dev->owner_cid.gid, rbd_dev->owner_cid.handle,
3163 cid->gid, cid->handle);
3164 rbd_dev->owner_cid = *cid; /* struct */
3167 static void format_lock_cookie(struct rbd_device *rbd_dev, char *buf)
3169 mutex_lock(&rbd_dev->watch_mutex);
3170 sprintf(buf, "%s %llu", RBD_LOCK_COOKIE_PREFIX, rbd_dev->watch_cookie);
3171 mutex_unlock(&rbd_dev->watch_mutex);
3175 * lock_rwsem must be held for write
3177 static int rbd_lock(struct rbd_device *rbd_dev)
3179 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3180 struct rbd_client_id cid = rbd_get_cid(rbd_dev);
3184 WARN_ON(__rbd_is_lock_owner(rbd_dev));
3186 format_lock_cookie(rbd_dev, cookie);
3187 ret = ceph_cls_lock(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
3188 RBD_LOCK_NAME, CEPH_CLS_LOCK_EXCLUSIVE, cookie,
3189 RBD_LOCK_TAG, "", 0);
3193 rbd_dev->lock_state = RBD_LOCK_STATE_LOCKED;
3194 rbd_set_owner_cid(rbd_dev, &cid);
3195 queue_work(rbd_dev->task_wq, &rbd_dev->acquired_lock_work);
3200 * lock_rwsem must be held for write
3202 static int rbd_unlock(struct rbd_device *rbd_dev)
3204 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3208 WARN_ON(!__rbd_is_lock_owner(rbd_dev));
3210 rbd_dev->lock_state = RBD_LOCK_STATE_UNLOCKED;
3212 format_lock_cookie(rbd_dev, cookie);
3213 ret = ceph_cls_unlock(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
3214 RBD_LOCK_NAME, cookie);
3215 if (ret && ret != -ENOENT) {
3216 rbd_warn(rbd_dev, "cls_unlock failed: %d", ret);
3220 rbd_set_owner_cid(rbd_dev, &rbd_empty_cid);
3221 queue_work(rbd_dev->task_wq, &rbd_dev->released_lock_work);
3225 static int __rbd_notify_op_lock(struct rbd_device *rbd_dev,
3226 enum rbd_notify_op notify_op,
3227 struct page ***preply_pages,
3230 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3231 struct rbd_client_id cid = rbd_get_cid(rbd_dev);
3232 int buf_size = 4 + 8 + 8 + CEPH_ENCODING_START_BLK_LEN;
3236 dout("%s rbd_dev %p notify_op %d\n", __func__, rbd_dev, notify_op);
3238 /* encode *LockPayload NotifyMessage (op + ClientId) */
3239 ceph_start_encoding(&p, 2, 1, buf_size - CEPH_ENCODING_START_BLK_LEN);
3240 ceph_encode_32(&p, notify_op);
3241 ceph_encode_64(&p, cid.gid);
3242 ceph_encode_64(&p, cid.handle);
3244 return ceph_osdc_notify(osdc, &rbd_dev->header_oid,
3245 &rbd_dev->header_oloc, buf, buf_size,
3246 RBD_NOTIFY_TIMEOUT, preply_pages, preply_len);
3249 static void rbd_notify_op_lock(struct rbd_device *rbd_dev,
3250 enum rbd_notify_op notify_op)
3252 struct page **reply_pages;
3255 __rbd_notify_op_lock(rbd_dev, notify_op, &reply_pages, &reply_len);
3256 ceph_release_page_vector(reply_pages, calc_pages_for(0, reply_len));
3259 static void rbd_notify_acquired_lock(struct work_struct *work)
3261 struct rbd_device *rbd_dev = container_of(work, struct rbd_device,
3262 acquired_lock_work);
3264 rbd_notify_op_lock(rbd_dev, RBD_NOTIFY_OP_ACQUIRED_LOCK);
3267 static void rbd_notify_released_lock(struct work_struct *work)
3269 struct rbd_device *rbd_dev = container_of(work, struct rbd_device,
3270 released_lock_work);
3272 rbd_notify_op_lock(rbd_dev, RBD_NOTIFY_OP_RELEASED_LOCK);
3275 static int rbd_request_lock(struct rbd_device *rbd_dev)
3277 struct page **reply_pages;
3279 bool lock_owner_responded = false;
3282 dout("%s rbd_dev %p\n", __func__, rbd_dev);
3284 ret = __rbd_notify_op_lock(rbd_dev, RBD_NOTIFY_OP_REQUEST_LOCK,
3285 &reply_pages, &reply_len);
3286 if (ret && ret != -ETIMEDOUT) {
3287 rbd_warn(rbd_dev, "failed to request lock: %d", ret);
3291 if (reply_len > 0 && reply_len <= PAGE_SIZE) {
3292 void *p = page_address(reply_pages[0]);
3293 void *const end = p + reply_len;
3296 ceph_decode_32_safe(&p, end, n, e_inval); /* num_acks */
3301 ceph_decode_need(&p, end, 8 + 8, e_inval);
3302 p += 8 + 8; /* skip gid and cookie */
3304 ceph_decode_32_safe(&p, end, len, e_inval);
3308 if (lock_owner_responded) {
3310 "duplicate lock owners detected");
3315 lock_owner_responded = true;
3316 ret = ceph_start_decoding(&p, end, 1, "ResponseMessage",
3320 "failed to decode ResponseMessage: %d",
3325 ret = ceph_decode_32(&p);
3329 if (!lock_owner_responded) {
3330 rbd_warn(rbd_dev, "no lock owners detected");
3335 ceph_release_page_vector(reply_pages, calc_pages_for(0, reply_len));
3343 static void wake_requests(struct rbd_device *rbd_dev, bool wake_all)
3345 dout("%s rbd_dev %p wake_all %d\n", __func__, rbd_dev, wake_all);
3347 cancel_delayed_work(&rbd_dev->lock_dwork);
3349 wake_up_all(&rbd_dev->lock_waitq);
3351 wake_up(&rbd_dev->lock_waitq);
3354 static int get_lock_owner_info(struct rbd_device *rbd_dev,
3355 struct ceph_locker **lockers, u32 *num_lockers)
3357 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3362 dout("%s rbd_dev %p\n", __func__, rbd_dev);
3364 ret = ceph_cls_lock_info(osdc, &rbd_dev->header_oid,
3365 &rbd_dev->header_oloc, RBD_LOCK_NAME,
3366 &lock_type, &lock_tag, lockers, num_lockers);
3370 if (*num_lockers == 0) {
3371 dout("%s rbd_dev %p no lockers detected\n", __func__, rbd_dev);
3375 if (strcmp(lock_tag, RBD_LOCK_TAG)) {
3376 rbd_warn(rbd_dev, "locked by external mechanism, tag %s",
3382 if (lock_type == CEPH_CLS_LOCK_SHARED) {
3383 rbd_warn(rbd_dev, "shared lock type detected");
3388 if (strncmp((*lockers)[0].id.cookie, RBD_LOCK_COOKIE_PREFIX,
3389 strlen(RBD_LOCK_COOKIE_PREFIX))) {
3390 rbd_warn(rbd_dev, "locked by external mechanism, cookie %s",
3391 (*lockers)[0].id.cookie);
3401 static int find_watcher(struct rbd_device *rbd_dev,
3402 const struct ceph_locker *locker)
3404 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3405 struct ceph_watch_item *watchers;
3411 ret = ceph_osdc_list_watchers(osdc, &rbd_dev->header_oid,
3412 &rbd_dev->header_oloc, &watchers,
3417 sscanf(locker->id.cookie, RBD_LOCK_COOKIE_PREFIX " %llu", &cookie);
3418 for (i = 0; i < num_watchers; i++) {
3419 if (!memcmp(&watchers[i].addr, &locker->info.addr,
3420 sizeof(locker->info.addr)) &&
3421 watchers[i].cookie == cookie) {
3422 struct rbd_client_id cid = {
3423 .gid = le64_to_cpu(watchers[i].name.num),
3427 dout("%s rbd_dev %p found cid %llu-%llu\n", __func__,
3428 rbd_dev, cid.gid, cid.handle);
3429 rbd_set_owner_cid(rbd_dev, &cid);
3435 dout("%s rbd_dev %p no watchers\n", __func__, rbd_dev);
3443 * lock_rwsem must be held for write
3445 static int rbd_try_lock(struct rbd_device *rbd_dev)
3447 struct ceph_client *client = rbd_dev->rbd_client->client;
3448 struct ceph_locker *lockers;
3453 ret = rbd_lock(rbd_dev);
3457 /* determine if the current lock holder is still alive */
3458 ret = get_lock_owner_info(rbd_dev, &lockers, &num_lockers);
3462 if (num_lockers == 0)
3465 ret = find_watcher(rbd_dev, lockers);
3468 ret = 0; /* have to request lock */
3472 rbd_warn(rbd_dev, "%s%llu seems dead, breaking lock",
3473 ENTITY_NAME(lockers[0].id.name));
3475 ret = ceph_monc_blacklist_add(&client->monc,
3476 &lockers[0].info.addr);
3478 rbd_warn(rbd_dev, "blacklist of %s%llu failed: %d",
3479 ENTITY_NAME(lockers[0].id.name), ret);
3483 ret = ceph_cls_break_lock(&client->osdc, &rbd_dev->header_oid,
3484 &rbd_dev->header_oloc, RBD_LOCK_NAME,
3485 lockers[0].id.cookie,
3486 &lockers[0].id.name);
3487 if (ret && ret != -ENOENT)
3491 ceph_free_lockers(lockers, num_lockers);
3495 ceph_free_lockers(lockers, num_lockers);
3500 * ret is set only if lock_state is RBD_LOCK_STATE_UNLOCKED
3502 static enum rbd_lock_state rbd_try_acquire_lock(struct rbd_device *rbd_dev,
3505 enum rbd_lock_state lock_state;
3507 down_read(&rbd_dev->lock_rwsem);
3508 dout("%s rbd_dev %p read lock_state %d\n", __func__, rbd_dev,
3509 rbd_dev->lock_state);
3510 if (__rbd_is_lock_owner(rbd_dev)) {
3511 lock_state = rbd_dev->lock_state;
3512 up_read(&rbd_dev->lock_rwsem);
3516 up_read(&rbd_dev->lock_rwsem);
3517 down_write(&rbd_dev->lock_rwsem);
3518 dout("%s rbd_dev %p write lock_state %d\n", __func__, rbd_dev,
3519 rbd_dev->lock_state);
3520 if (!__rbd_is_lock_owner(rbd_dev)) {
3521 *pret = rbd_try_lock(rbd_dev);
3523 rbd_warn(rbd_dev, "failed to acquire lock: %d", *pret);
3526 lock_state = rbd_dev->lock_state;
3527 up_write(&rbd_dev->lock_rwsem);
3531 static void rbd_acquire_lock(struct work_struct *work)
3533 struct rbd_device *rbd_dev = container_of(to_delayed_work(work),
3534 struct rbd_device, lock_dwork);
3535 enum rbd_lock_state lock_state;
3538 dout("%s rbd_dev %p\n", __func__, rbd_dev);
3540 lock_state = rbd_try_acquire_lock(rbd_dev, &ret);
3541 if (lock_state != RBD_LOCK_STATE_UNLOCKED || ret == -EBLACKLISTED) {
3542 if (lock_state == RBD_LOCK_STATE_LOCKED)
3543 wake_requests(rbd_dev, true);
3544 dout("%s rbd_dev %p lock_state %d ret %d - done\n", __func__,
3545 rbd_dev, lock_state, ret);
3549 ret = rbd_request_lock(rbd_dev);
3550 if (ret == -ETIMEDOUT) {
3551 goto again; /* treat this as a dead client */
3552 } else if (ret < 0) {
3553 rbd_warn(rbd_dev, "error requesting lock: %d", ret);
3554 mod_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork,
3558 * lock owner acked, but resend if we don't see them
3561 dout("%s rbd_dev %p requeueing lock_dwork\n", __func__,
3563 mod_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork,
3564 msecs_to_jiffies(2 * RBD_NOTIFY_TIMEOUT * MSEC_PER_SEC));
3569 * lock_rwsem must be held for write
3571 static bool rbd_release_lock(struct rbd_device *rbd_dev)
3573 dout("%s rbd_dev %p read lock_state %d\n", __func__, rbd_dev,
3574 rbd_dev->lock_state);
3575 if (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED)
3578 rbd_dev->lock_state = RBD_LOCK_STATE_RELEASING;
3579 downgrade_write(&rbd_dev->lock_rwsem);
3581 * Ensure that all in-flight IO is flushed.
3583 * FIXME: ceph_osdc_sync() flushes the entire OSD client, which
3584 * may be shared with other devices.
3586 ceph_osdc_sync(&rbd_dev->rbd_client->client->osdc);
3587 up_read(&rbd_dev->lock_rwsem);
3589 down_write(&rbd_dev->lock_rwsem);
3590 dout("%s rbd_dev %p write lock_state %d\n", __func__, rbd_dev,
3591 rbd_dev->lock_state);
3592 if (rbd_dev->lock_state != RBD_LOCK_STATE_RELEASING)
3595 if (!rbd_unlock(rbd_dev))
3597 * Give others a chance to grab the lock - we would re-acquire
3598 * almost immediately if we got new IO during ceph_osdc_sync()
3599 * otherwise. We need to ack our own notifications, so this
3600 * lock_dwork will be requeued from rbd_wait_state_locked()
3601 * after wake_requests() in rbd_handle_released_lock().
3603 cancel_delayed_work(&rbd_dev->lock_dwork);
3608 static void rbd_release_lock_work(struct work_struct *work)
3610 struct rbd_device *rbd_dev = container_of(work, struct rbd_device,
3613 down_write(&rbd_dev->lock_rwsem);
3614 rbd_release_lock(rbd_dev);
3615 up_write(&rbd_dev->lock_rwsem);
3618 static void rbd_handle_acquired_lock(struct rbd_device *rbd_dev, u8 struct_v,
3621 struct rbd_client_id cid = { 0 };
3623 if (struct_v >= 2) {
3624 cid.gid = ceph_decode_64(p);
3625 cid.handle = ceph_decode_64(p);
3628 dout("%s rbd_dev %p cid %llu-%llu\n", __func__, rbd_dev, cid.gid,
3630 if (!rbd_cid_equal(&cid, &rbd_empty_cid)) {
3631 down_write(&rbd_dev->lock_rwsem);
3632 if (rbd_cid_equal(&cid, &rbd_dev->owner_cid)) {
3634 * we already know that the remote client is
3637 up_write(&rbd_dev->lock_rwsem);
3641 rbd_set_owner_cid(rbd_dev, &cid);
3642 downgrade_write(&rbd_dev->lock_rwsem);
3644 down_read(&rbd_dev->lock_rwsem);
3647 if (!__rbd_is_lock_owner(rbd_dev))
3648 wake_requests(rbd_dev, false);
3649 up_read(&rbd_dev->lock_rwsem);
3652 static void rbd_handle_released_lock(struct rbd_device *rbd_dev, u8 struct_v,
3655 struct rbd_client_id cid = { 0 };
3657 if (struct_v >= 2) {
3658 cid.gid = ceph_decode_64(p);
3659 cid.handle = ceph_decode_64(p);
3662 dout("%s rbd_dev %p cid %llu-%llu\n", __func__, rbd_dev, cid.gid,
3664 if (!rbd_cid_equal(&cid, &rbd_empty_cid)) {
3665 down_write(&rbd_dev->lock_rwsem);
3666 if (!rbd_cid_equal(&cid, &rbd_dev->owner_cid)) {
3667 dout("%s rbd_dev %p unexpected owner, cid %llu-%llu != owner_cid %llu-%llu\n",
3668 __func__, rbd_dev, cid.gid, cid.handle,
3669 rbd_dev->owner_cid.gid, rbd_dev->owner_cid.handle);
3670 up_write(&rbd_dev->lock_rwsem);
3674 rbd_set_owner_cid(rbd_dev, &rbd_empty_cid);
3675 downgrade_write(&rbd_dev->lock_rwsem);
3677 down_read(&rbd_dev->lock_rwsem);
3680 if (!__rbd_is_lock_owner(rbd_dev))
3681 wake_requests(rbd_dev, false);
3682 up_read(&rbd_dev->lock_rwsem);
3685 static bool rbd_handle_request_lock(struct rbd_device *rbd_dev, u8 struct_v,
3688 struct rbd_client_id my_cid = rbd_get_cid(rbd_dev);
3689 struct rbd_client_id cid = { 0 };
3692 if (struct_v >= 2) {
3693 cid.gid = ceph_decode_64(p);
3694 cid.handle = ceph_decode_64(p);
3697 dout("%s rbd_dev %p cid %llu-%llu\n", __func__, rbd_dev, cid.gid,
3699 if (rbd_cid_equal(&cid, &my_cid))
3702 down_read(&rbd_dev->lock_rwsem);
3703 need_to_send = __rbd_is_lock_owner(rbd_dev);
3704 if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED) {
3705 if (!rbd_cid_equal(&rbd_dev->owner_cid, &rbd_empty_cid)) {
3706 dout("%s rbd_dev %p queueing unlock_work\n", __func__,
3708 queue_work(rbd_dev->task_wq, &rbd_dev->unlock_work);
3711 up_read(&rbd_dev->lock_rwsem);
3712 return need_to_send;
3715 static void __rbd_acknowledge_notify(struct rbd_device *rbd_dev,
3716 u64 notify_id, u64 cookie, s32 *result)
3718 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3719 int buf_size = 4 + CEPH_ENCODING_START_BLK_LEN;
3726 /* encode ResponseMessage */
3727 ceph_start_encoding(&p, 1, 1,
3728 buf_size - CEPH_ENCODING_START_BLK_LEN);
3729 ceph_encode_32(&p, *result);
3734 ret = ceph_osdc_notify_ack(osdc, &rbd_dev->header_oid,
3735 &rbd_dev->header_oloc, notify_id, cookie,
3738 rbd_warn(rbd_dev, "acknowledge_notify failed: %d", ret);
3741 static void rbd_acknowledge_notify(struct rbd_device *rbd_dev, u64 notify_id,
3744 dout("%s rbd_dev %p\n", __func__, rbd_dev);
3745 __rbd_acknowledge_notify(rbd_dev, notify_id, cookie, NULL);
3748 static void rbd_acknowledge_notify_result(struct rbd_device *rbd_dev,
3749 u64 notify_id, u64 cookie, s32 result)
3751 dout("%s rbd_dev %p result %d\n", __func__, rbd_dev, result);
3752 __rbd_acknowledge_notify(rbd_dev, notify_id, cookie, &result);
3755 static void rbd_watch_cb(void *arg, u64 notify_id, u64 cookie,
3756 u64 notifier_id, void *data, size_t data_len)
3758 struct rbd_device *rbd_dev = arg;
3760 void *const end = p + data_len;
3766 dout("%s rbd_dev %p cookie %llu notify_id %llu data_len %zu\n",
3767 __func__, rbd_dev, cookie, notify_id, data_len);
3769 ret = ceph_start_decoding(&p, end, 1, "NotifyMessage",
3772 rbd_warn(rbd_dev, "failed to decode NotifyMessage: %d",
3777 notify_op = ceph_decode_32(&p);
3779 /* legacy notification for header updates */
3780 notify_op = RBD_NOTIFY_OP_HEADER_UPDATE;
3784 dout("%s rbd_dev %p notify_op %u\n", __func__, rbd_dev, notify_op);
3785 switch (notify_op) {
3786 case RBD_NOTIFY_OP_ACQUIRED_LOCK:
3787 rbd_handle_acquired_lock(rbd_dev, struct_v, &p);
3788 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
3790 case RBD_NOTIFY_OP_RELEASED_LOCK:
3791 rbd_handle_released_lock(rbd_dev, struct_v, &p);
3792 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
3794 case RBD_NOTIFY_OP_REQUEST_LOCK:
3795 if (rbd_handle_request_lock(rbd_dev, struct_v, &p))
3797 * send ResponseMessage(0) back so the client
3798 * can detect a missing owner
3800 rbd_acknowledge_notify_result(rbd_dev, notify_id,
3803 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
3805 case RBD_NOTIFY_OP_HEADER_UPDATE:
3806 ret = rbd_dev_refresh(rbd_dev);
3808 rbd_warn(rbd_dev, "refresh failed: %d", ret);
3810 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
3813 if (rbd_is_lock_owner(rbd_dev))
3814 rbd_acknowledge_notify_result(rbd_dev, notify_id,
3815 cookie, -EOPNOTSUPP);
3817 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
3822 static void __rbd_unregister_watch(struct rbd_device *rbd_dev);
3824 static void rbd_watch_errcb(void *arg, u64 cookie, int err)
3826 struct rbd_device *rbd_dev = arg;
3828 rbd_warn(rbd_dev, "encountered watch error: %d", err);
3830 down_write(&rbd_dev->lock_rwsem);
3831 rbd_set_owner_cid(rbd_dev, &rbd_empty_cid);
3832 up_write(&rbd_dev->lock_rwsem);
3834 mutex_lock(&rbd_dev->watch_mutex);
3835 if (rbd_dev->watch_state == RBD_WATCH_STATE_REGISTERED) {
3836 __rbd_unregister_watch(rbd_dev);
3837 rbd_dev->watch_state = RBD_WATCH_STATE_ERROR;
3839 queue_delayed_work(rbd_dev->task_wq, &rbd_dev->watch_dwork, 0);
3841 mutex_unlock(&rbd_dev->watch_mutex);
3845 * watch_mutex must be locked
3847 static int __rbd_register_watch(struct rbd_device *rbd_dev)
3849 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3850 struct ceph_osd_linger_request *handle;
3852 rbd_assert(!rbd_dev->watch_handle);
3853 dout("%s rbd_dev %p\n", __func__, rbd_dev);
3855 handle = ceph_osdc_watch(osdc, &rbd_dev->header_oid,
3856 &rbd_dev->header_oloc, rbd_watch_cb,
3857 rbd_watch_errcb, rbd_dev);
3859 return PTR_ERR(handle);
3861 rbd_dev->watch_handle = handle;
3866 * watch_mutex must be locked
3868 static void __rbd_unregister_watch(struct rbd_device *rbd_dev)
3870 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3873 rbd_assert(rbd_dev->watch_handle);
3874 dout("%s rbd_dev %p\n", __func__, rbd_dev);
3876 ret = ceph_osdc_unwatch(osdc, rbd_dev->watch_handle);
3878 rbd_warn(rbd_dev, "failed to unwatch: %d", ret);
3880 rbd_dev->watch_handle = NULL;
3883 static int rbd_register_watch(struct rbd_device *rbd_dev)
3887 mutex_lock(&rbd_dev->watch_mutex);
3888 rbd_assert(rbd_dev->watch_state == RBD_WATCH_STATE_UNREGISTERED);
3889 ret = __rbd_register_watch(rbd_dev);
3893 rbd_dev->watch_state = RBD_WATCH_STATE_REGISTERED;
3894 rbd_dev->watch_cookie = rbd_dev->watch_handle->linger_id;
3897 mutex_unlock(&rbd_dev->watch_mutex);
3901 static void cancel_tasks_sync(struct rbd_device *rbd_dev)
3903 dout("%s rbd_dev %p\n", __func__, rbd_dev);
3905 cancel_delayed_work_sync(&rbd_dev->watch_dwork);
3906 cancel_work_sync(&rbd_dev->acquired_lock_work);
3907 cancel_work_sync(&rbd_dev->released_lock_work);
3908 cancel_delayed_work_sync(&rbd_dev->lock_dwork);
3909 cancel_work_sync(&rbd_dev->unlock_work);
3912 static void rbd_unregister_watch(struct rbd_device *rbd_dev)
3914 WARN_ON(waitqueue_active(&rbd_dev->lock_waitq));
3915 cancel_tasks_sync(rbd_dev);
3917 mutex_lock(&rbd_dev->watch_mutex);
3918 if (rbd_dev->watch_state == RBD_WATCH_STATE_REGISTERED)
3919 __rbd_unregister_watch(rbd_dev);
3920 rbd_dev->watch_state = RBD_WATCH_STATE_UNREGISTERED;
3921 mutex_unlock(&rbd_dev->watch_mutex);
3923 ceph_osdc_flush_notifies(&rbd_dev->rbd_client->client->osdc);
3926 static void rbd_reregister_watch(struct work_struct *work)
3928 struct rbd_device *rbd_dev = container_of(to_delayed_work(work),
3929 struct rbd_device, watch_dwork);
3930 bool was_lock_owner = false;
3933 dout("%s rbd_dev %p\n", __func__, rbd_dev);
3935 down_write(&rbd_dev->lock_rwsem);
3936 if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED)
3937 was_lock_owner = rbd_release_lock(rbd_dev);
3939 mutex_lock(&rbd_dev->watch_mutex);
3940 if (rbd_dev->watch_state != RBD_WATCH_STATE_ERROR)
3943 ret = __rbd_register_watch(rbd_dev);
3945 rbd_warn(rbd_dev, "failed to reregister watch: %d", ret);
3946 if (ret != -EBLACKLISTED)
3947 queue_delayed_work(rbd_dev->task_wq,
3948 &rbd_dev->watch_dwork,
3953 rbd_dev->watch_state = RBD_WATCH_STATE_REGISTERED;
3954 rbd_dev->watch_cookie = rbd_dev->watch_handle->linger_id;
3955 mutex_unlock(&rbd_dev->watch_mutex);
3957 ret = rbd_dev_refresh(rbd_dev);
3959 rbd_warn(rbd_dev, "reregisteration refresh failed: %d", ret);
3961 if (was_lock_owner) {
3962 ret = rbd_try_lock(rbd_dev);
3964 rbd_warn(rbd_dev, "reregisteration lock failed: %d",
3968 up_write(&rbd_dev->lock_rwsem);
3969 wake_requests(rbd_dev, true);
3973 mutex_unlock(&rbd_dev->watch_mutex);
3974 up_write(&rbd_dev->lock_rwsem);
3978 * Synchronous osd object method call. Returns the number of bytes
3979 * returned in the outbound buffer, or a negative error code.
3981 static int rbd_obj_method_sync(struct rbd_device *rbd_dev,
3982 const char *object_name,
3983 const char *class_name,
3984 const char *method_name,
3985 const void *outbound,
3986 size_t outbound_size,
3988 size_t inbound_size)
3990 struct rbd_obj_request *obj_request;
3991 struct page **pages;
3996 * Method calls are ultimately read operations. The result
3997 * should placed into the inbound buffer provided. They
3998 * also supply outbound data--parameters for the object
3999 * method. Currently if this is present it will be a
4002 page_count = (u32)calc_pages_for(0, inbound_size);
4003 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
4005 return PTR_ERR(pages);
4008 obj_request = rbd_obj_request_create(object_name, 0, inbound_size,
4013 obj_request->pages = pages;
4014 obj_request->page_count = page_count;
4016 obj_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_READ, 1,
4018 if (!obj_request->osd_req)
4021 osd_req_op_cls_init(obj_request->osd_req, 0, CEPH_OSD_OP_CALL,
4022 class_name, method_name);
4023 if (outbound_size) {
4024 struct ceph_pagelist *pagelist;
4026 pagelist = kmalloc(sizeof (*pagelist), GFP_NOFS);
4030 ceph_pagelist_init(pagelist);
4031 ceph_pagelist_append(pagelist, outbound, outbound_size);
4032 osd_req_op_cls_request_data_pagelist(obj_request->osd_req, 0,
4035 osd_req_op_cls_response_data_pages(obj_request->osd_req, 0,
4036 obj_request->pages, inbound_size,
4038 rbd_osd_req_format_read(obj_request);
4040 rbd_obj_request_submit(obj_request);
4041 ret = rbd_obj_request_wait(obj_request);
4045 ret = obj_request->result;
4049 rbd_assert(obj_request->xferred < (u64)INT_MAX);
4050 ret = (int)obj_request->xferred;
4051 ceph_copy_from_page_vector(pages, inbound, 0, obj_request->xferred);
4054 rbd_obj_request_put(obj_request);
4056 ceph_release_page_vector(pages, page_count);
4062 * lock_rwsem must be held for read
4064 static void rbd_wait_state_locked(struct rbd_device *rbd_dev)
4070 * Note the use of mod_delayed_work() in rbd_acquire_lock()
4071 * and cancel_delayed_work() in wake_requests().
4073 dout("%s rbd_dev %p queueing lock_dwork\n", __func__, rbd_dev);
4074 queue_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork, 0);
4075 prepare_to_wait_exclusive(&rbd_dev->lock_waitq, &wait,
4076 TASK_UNINTERRUPTIBLE);
4077 up_read(&rbd_dev->lock_rwsem);
4079 down_read(&rbd_dev->lock_rwsem);
4080 } while (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED);
4081 finish_wait(&rbd_dev->lock_waitq, &wait);
4084 static void rbd_queue_workfn(struct work_struct *work)
4086 struct request *rq = blk_mq_rq_from_pdu(work);
4087 struct rbd_device *rbd_dev = rq->q->queuedata;
4088 struct rbd_img_request *img_request;
4089 struct ceph_snap_context *snapc = NULL;
4090 u64 offset = (u64)blk_rq_pos(rq) << SECTOR_SHIFT;
4091 u64 length = blk_rq_bytes(rq);
4092 enum obj_operation_type op_type;
4094 bool must_be_locked;
4097 if (rq->cmd_type != REQ_TYPE_FS) {
4098 dout("%s: non-fs request type %d\n", __func__,
4099 (int) rq->cmd_type);
4104 if (req_op(rq) == REQ_OP_DISCARD)
4105 op_type = OBJ_OP_DISCARD;
4106 else if (req_op(rq) == REQ_OP_WRITE)
4107 op_type = OBJ_OP_WRITE;
4109 op_type = OBJ_OP_READ;
4111 /* Ignore/skip any zero-length requests */
4114 dout("%s: zero-length request\n", __func__);
4119 /* Only reads are allowed to a read-only device */
4121 if (op_type != OBJ_OP_READ) {
4122 if (rbd_dev->mapping.read_only) {
4126 rbd_assert(rbd_dev->spec->snap_id == CEPH_NOSNAP);
4130 * Quit early if the mapped snapshot no longer exists. It's
4131 * still possible the snapshot will have disappeared by the
4132 * time our request arrives at the osd, but there's no sense in
4133 * sending it if we already know.
4135 if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags)) {
4136 dout("request for non-existent snapshot");
4137 rbd_assert(rbd_dev->spec->snap_id != CEPH_NOSNAP);
4142 if (offset && length > U64_MAX - offset + 1) {
4143 rbd_warn(rbd_dev, "bad request range (%llu~%llu)", offset,
4146 goto err_rq; /* Shouldn't happen */
4149 blk_mq_start_request(rq);
4151 down_read(&rbd_dev->header_rwsem);
4152 mapping_size = rbd_dev->mapping.size;
4153 if (op_type != OBJ_OP_READ) {
4154 snapc = rbd_dev->header.snapc;
4155 ceph_get_snap_context(snapc);
4156 must_be_locked = rbd_is_lock_supported(rbd_dev);
4158 must_be_locked = rbd_dev->opts->lock_on_read &&
4159 rbd_is_lock_supported(rbd_dev);
4161 up_read(&rbd_dev->header_rwsem);
4163 if (offset + length > mapping_size) {
4164 rbd_warn(rbd_dev, "beyond EOD (%llu~%llu > %llu)", offset,
4165 length, mapping_size);
4170 if (must_be_locked) {
4171 down_read(&rbd_dev->lock_rwsem);
4172 if (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED)
4173 rbd_wait_state_locked(rbd_dev);
4176 img_request = rbd_img_request_create(rbd_dev, offset, length, op_type,
4182 img_request->rq = rq;
4183 snapc = NULL; /* img_request consumes a ref */
4185 if (op_type == OBJ_OP_DISCARD)
4186 result = rbd_img_request_fill(img_request, OBJ_REQUEST_NODATA,
4189 result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
4192 goto err_img_request;
4194 result = rbd_img_request_submit(img_request);
4196 goto err_img_request;
4199 up_read(&rbd_dev->lock_rwsem);
4203 rbd_img_request_put(img_request);
4206 up_read(&rbd_dev->lock_rwsem);
4209 rbd_warn(rbd_dev, "%s %llx at %llx result %d",
4210 obj_op_name(op_type), length, offset, result);
4211 ceph_put_snap_context(snapc);
4213 blk_mq_end_request(rq, result);
4216 static int rbd_queue_rq(struct blk_mq_hw_ctx *hctx,
4217 const struct blk_mq_queue_data *bd)
4219 struct request *rq = bd->rq;
4220 struct work_struct *work = blk_mq_rq_to_pdu(rq);
4222 queue_work(rbd_wq, work);
4223 return BLK_MQ_RQ_QUEUE_OK;
4226 static void rbd_free_disk(struct rbd_device *rbd_dev)
4228 struct gendisk *disk = rbd_dev->disk;
4233 rbd_dev->disk = NULL;
4234 if (disk->flags & GENHD_FL_UP) {
4237 blk_cleanup_queue(disk->queue);
4238 blk_mq_free_tag_set(&rbd_dev->tag_set);
4243 static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
4244 const char *object_name,
4245 u64 offset, u64 length, void *buf)
4248 struct rbd_obj_request *obj_request;
4249 struct page **pages = NULL;
4254 page_count = (u32) calc_pages_for(offset, length);
4255 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
4257 return PTR_ERR(pages);
4260 obj_request = rbd_obj_request_create(object_name, offset, length,
4265 obj_request->pages = pages;
4266 obj_request->page_count = page_count;
4268 obj_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_READ, 1,
4270 if (!obj_request->osd_req)
4273 osd_req_op_extent_init(obj_request->osd_req, 0, CEPH_OSD_OP_READ,
4274 offset, length, 0, 0);
4275 osd_req_op_extent_osd_data_pages(obj_request->osd_req, 0,
4277 obj_request->length,
4278 obj_request->offset & ~PAGE_MASK,
4280 rbd_osd_req_format_read(obj_request);
4282 rbd_obj_request_submit(obj_request);
4283 ret = rbd_obj_request_wait(obj_request);
4287 ret = obj_request->result;
4291 rbd_assert(obj_request->xferred <= (u64) SIZE_MAX);
4292 size = (size_t) obj_request->xferred;
4293 ceph_copy_from_page_vector(pages, buf, 0, size);
4294 rbd_assert(size <= (size_t)INT_MAX);
4298 rbd_obj_request_put(obj_request);
4300 ceph_release_page_vector(pages, page_count);
4306 * Read the complete header for the given rbd device. On successful
4307 * return, the rbd_dev->header field will contain up-to-date
4308 * information about the image.
4310 static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev)
4312 struct rbd_image_header_ondisk *ondisk = NULL;
4319 * The complete header will include an array of its 64-bit
4320 * snapshot ids, followed by the names of those snapshots as
4321 * a contiguous block of NUL-terminated strings. Note that
4322 * the number of snapshots could change by the time we read
4323 * it in, in which case we re-read it.
4330 size = sizeof (*ondisk);
4331 size += snap_count * sizeof (struct rbd_image_snap_ondisk);
4333 ondisk = kmalloc(size, GFP_KERNEL);
4337 ret = rbd_obj_read_sync(rbd_dev, rbd_dev->header_oid.name,
4341 if ((size_t)ret < size) {
4343 rbd_warn(rbd_dev, "short header read (want %zd got %d)",
4347 if (!rbd_dev_ondisk_valid(ondisk)) {
4349 rbd_warn(rbd_dev, "invalid header");
4353 names_size = le64_to_cpu(ondisk->snap_names_len);
4354 want_count = snap_count;
4355 snap_count = le32_to_cpu(ondisk->snap_count);
4356 } while (snap_count != want_count);
4358 ret = rbd_header_from_disk(rbd_dev, ondisk);
4366 * Clear the rbd device's EXISTS flag if the snapshot it's mapped to
4367 * has disappeared from the (just updated) snapshot context.
4369 static void rbd_exists_validate(struct rbd_device *rbd_dev)
4373 if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags))
4376 snap_id = rbd_dev->spec->snap_id;
4377 if (snap_id == CEPH_NOSNAP)
4380 if (rbd_dev_snap_index(rbd_dev, snap_id) == BAD_SNAP_INDEX)
4381 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
4384 static void rbd_dev_update_size(struct rbd_device *rbd_dev)
4389 * If EXISTS is not set, rbd_dev->disk may be NULL, so don't
4390 * try to update its size. If REMOVING is set, updating size
4391 * is just useless work since the device can't be opened.
4393 if (test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags) &&
4394 !test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags)) {
4395 size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE;
4396 dout("setting size to %llu sectors", (unsigned long long)size);
4397 set_capacity(rbd_dev->disk, size);
4398 revalidate_disk(rbd_dev->disk);
4402 static int rbd_dev_refresh(struct rbd_device *rbd_dev)
4407 down_write(&rbd_dev->header_rwsem);
4408 mapping_size = rbd_dev->mapping.size;
4410 ret = rbd_dev_header_info(rbd_dev);
4415 * If there is a parent, see if it has disappeared due to the
4416 * mapped image getting flattened.
4418 if (rbd_dev->parent) {
4419 ret = rbd_dev_v2_parent_info(rbd_dev);
4424 if (rbd_dev->spec->snap_id == CEPH_NOSNAP) {
4425 rbd_dev->mapping.size = rbd_dev->header.image_size;
4427 /* validate mapped snapshot's EXISTS flag */
4428 rbd_exists_validate(rbd_dev);
4432 up_write(&rbd_dev->header_rwsem);
4433 if (!ret && mapping_size != rbd_dev->mapping.size)
4434 rbd_dev_update_size(rbd_dev);
4439 static int rbd_init_request(void *data, struct request *rq,
4440 unsigned int hctx_idx, unsigned int request_idx,
4441 unsigned int numa_node)
4443 struct work_struct *work = blk_mq_rq_to_pdu(rq);
4445 INIT_WORK(work, rbd_queue_workfn);
4449 static struct blk_mq_ops rbd_mq_ops = {
4450 .queue_rq = rbd_queue_rq,
4451 .map_queue = blk_mq_map_queue,
4452 .init_request = rbd_init_request,
4455 static int rbd_init_disk(struct rbd_device *rbd_dev)
4457 struct gendisk *disk;
4458 struct request_queue *q;
4462 /* create gendisk info */
4463 disk = alloc_disk(single_major ?
4464 (1 << RBD_SINGLE_MAJOR_PART_SHIFT) :
4465 RBD_MINORS_PER_MAJOR);
4469 snprintf(disk->disk_name, sizeof(disk->disk_name), RBD_DRV_NAME "%d",
4471 disk->major = rbd_dev->major;
4472 disk->first_minor = rbd_dev->minor;
4474 disk->flags |= GENHD_FL_EXT_DEVT;
4475 disk->fops = &rbd_bd_ops;
4476 disk->private_data = rbd_dev;
4478 memset(&rbd_dev->tag_set, 0, sizeof(rbd_dev->tag_set));
4479 rbd_dev->tag_set.ops = &rbd_mq_ops;
4480 rbd_dev->tag_set.queue_depth = rbd_dev->opts->queue_depth;
4481 rbd_dev->tag_set.numa_node = NUMA_NO_NODE;
4482 rbd_dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE;
4483 rbd_dev->tag_set.nr_hw_queues = 1;
4484 rbd_dev->tag_set.cmd_size = sizeof(struct work_struct);
4486 err = blk_mq_alloc_tag_set(&rbd_dev->tag_set);
4490 q = blk_mq_init_queue(&rbd_dev->tag_set);
4496 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
4497 /* QUEUE_FLAG_ADD_RANDOM is off by default for blk-mq */
4499 /* set io sizes to object size */
4500 segment_size = rbd_obj_bytes(&rbd_dev->header);
4501 blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE);
4502 q->limits.max_sectors = queue_max_hw_sectors(q);
4503 blk_queue_max_segments(q, segment_size / SECTOR_SIZE);
4504 blk_queue_max_segment_size(q, segment_size);
4505 blk_queue_io_min(q, segment_size);
4506 blk_queue_io_opt(q, segment_size);
4508 /* enable the discard support */
4509 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
4510 q->limits.discard_granularity = segment_size;
4511 q->limits.discard_alignment = segment_size;
4512 blk_queue_max_discard_sectors(q, segment_size / SECTOR_SIZE);
4513 q->limits.discard_zeroes_data = 1;
4515 if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC))
4516 q->backing_dev_info.capabilities |= BDI_CAP_STABLE_WRITES;
4520 q->queuedata = rbd_dev;
4522 rbd_dev->disk = disk;
4526 blk_mq_free_tag_set(&rbd_dev->tag_set);
4536 static struct rbd_device *dev_to_rbd_dev(struct device *dev)
4538 return container_of(dev, struct rbd_device, dev);
4541 static ssize_t rbd_size_show(struct device *dev,
4542 struct device_attribute *attr, char *buf)
4544 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4546 return sprintf(buf, "%llu\n",
4547 (unsigned long long)rbd_dev->mapping.size);
4551 * Note this shows the features for whatever's mapped, which is not
4552 * necessarily the base image.
4554 static ssize_t rbd_features_show(struct device *dev,
4555 struct device_attribute *attr, char *buf)
4557 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4559 return sprintf(buf, "0x%016llx\n",
4560 (unsigned long long)rbd_dev->mapping.features);
4563 static ssize_t rbd_major_show(struct device *dev,
4564 struct device_attribute *attr, char *buf)
4566 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4569 return sprintf(buf, "%d\n", rbd_dev->major);
4571 return sprintf(buf, "(none)\n");
4574 static ssize_t rbd_minor_show(struct device *dev,
4575 struct device_attribute *attr, char *buf)
4577 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4579 return sprintf(buf, "%d\n", rbd_dev->minor);
4582 static ssize_t rbd_client_addr_show(struct device *dev,
4583 struct device_attribute *attr, char *buf)
4585 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4586 struct ceph_entity_addr *client_addr =
4587 ceph_client_addr(rbd_dev->rbd_client->client);
4589 return sprintf(buf, "%pISpc/%u\n", &client_addr->in_addr,
4590 le32_to_cpu(client_addr->nonce));
4593 static ssize_t rbd_client_id_show(struct device *dev,
4594 struct device_attribute *attr, char *buf)
4596 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4598 return sprintf(buf, "client%lld\n",
4599 ceph_client_gid(rbd_dev->rbd_client->client));
4602 static ssize_t rbd_cluster_fsid_show(struct device *dev,
4603 struct device_attribute *attr, char *buf)
4605 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4607 return sprintf(buf, "%pU\n", &rbd_dev->rbd_client->client->fsid);
4610 static ssize_t rbd_config_info_show(struct device *dev,
4611 struct device_attribute *attr, char *buf)
4613 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4615 return sprintf(buf, "%s\n", rbd_dev->config_info);
4618 static ssize_t rbd_pool_show(struct device *dev,
4619 struct device_attribute *attr, char *buf)
4621 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4623 return sprintf(buf, "%s\n", rbd_dev->spec->pool_name);
4626 static ssize_t rbd_pool_id_show(struct device *dev,
4627 struct device_attribute *attr, char *buf)
4629 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4631 return sprintf(buf, "%llu\n",
4632 (unsigned long long) rbd_dev->spec->pool_id);
4635 static ssize_t rbd_name_show(struct device *dev,
4636 struct device_attribute *attr, char *buf)
4638 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4640 if (rbd_dev->spec->image_name)
4641 return sprintf(buf, "%s\n", rbd_dev->spec->image_name);
4643 return sprintf(buf, "(unknown)\n");
4646 static ssize_t rbd_image_id_show(struct device *dev,
4647 struct device_attribute *attr, char *buf)
4649 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4651 return sprintf(buf, "%s\n", rbd_dev->spec->image_id);
4655 * Shows the name of the currently-mapped snapshot (or
4656 * RBD_SNAP_HEAD_NAME for the base image).
4658 static ssize_t rbd_snap_show(struct device *dev,
4659 struct device_attribute *attr,
4662 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4664 return sprintf(buf, "%s\n", rbd_dev->spec->snap_name);
4667 static ssize_t rbd_snap_id_show(struct device *dev,
4668 struct device_attribute *attr, char *buf)
4670 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4672 return sprintf(buf, "%llu\n", rbd_dev->spec->snap_id);
4676 * For a v2 image, shows the chain of parent images, separated by empty
4677 * lines. For v1 images or if there is no parent, shows "(no parent
4680 static ssize_t rbd_parent_show(struct device *dev,
4681 struct device_attribute *attr,
4684 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4687 if (!rbd_dev->parent)
4688 return sprintf(buf, "(no parent image)\n");
4690 for ( ; rbd_dev->parent; rbd_dev = rbd_dev->parent) {
4691 struct rbd_spec *spec = rbd_dev->parent_spec;
4693 count += sprintf(&buf[count], "%s"
4694 "pool_id %llu\npool_name %s\n"
4695 "image_id %s\nimage_name %s\n"
4696 "snap_id %llu\nsnap_name %s\n"
4698 !count ? "" : "\n", /* first? */
4699 spec->pool_id, spec->pool_name,
4700 spec->image_id, spec->image_name ?: "(unknown)",
4701 spec->snap_id, spec->snap_name,
4702 rbd_dev->parent_overlap);
4708 static ssize_t rbd_image_refresh(struct device *dev,
4709 struct device_attribute *attr,
4713 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4716 ret = rbd_dev_refresh(rbd_dev);
4723 static DEVICE_ATTR(size, S_IRUGO, rbd_size_show, NULL);
4724 static DEVICE_ATTR(features, S_IRUGO, rbd_features_show, NULL);
4725 static DEVICE_ATTR(major, S_IRUGO, rbd_major_show, NULL);
4726 static DEVICE_ATTR(minor, S_IRUGO, rbd_minor_show, NULL);
4727 static DEVICE_ATTR(client_addr, S_IRUGO, rbd_client_addr_show, NULL);
4728 static DEVICE_ATTR(client_id, S_IRUGO, rbd_client_id_show, NULL);
4729 static DEVICE_ATTR(cluster_fsid, S_IRUGO, rbd_cluster_fsid_show, NULL);
4730 static DEVICE_ATTR(config_info, S_IRUSR, rbd_config_info_show, NULL);
4731 static DEVICE_ATTR(pool, S_IRUGO, rbd_pool_show, NULL);
4732 static DEVICE_ATTR(pool_id, S_IRUGO, rbd_pool_id_show, NULL);
4733 static DEVICE_ATTR(name, S_IRUGO, rbd_name_show, NULL);
4734 static DEVICE_ATTR(image_id, S_IRUGO, rbd_image_id_show, NULL);
4735 static DEVICE_ATTR(refresh, S_IWUSR, NULL, rbd_image_refresh);
4736 static DEVICE_ATTR(current_snap, S_IRUGO, rbd_snap_show, NULL);
4737 static DEVICE_ATTR(snap_id, S_IRUGO, rbd_snap_id_show, NULL);
4738 static DEVICE_ATTR(parent, S_IRUGO, rbd_parent_show, NULL);
4740 static struct attribute *rbd_attrs[] = {
4741 &dev_attr_size.attr,
4742 &dev_attr_features.attr,
4743 &dev_attr_major.attr,
4744 &dev_attr_minor.attr,
4745 &dev_attr_client_addr.attr,
4746 &dev_attr_client_id.attr,
4747 &dev_attr_cluster_fsid.attr,
4748 &dev_attr_config_info.attr,
4749 &dev_attr_pool.attr,
4750 &dev_attr_pool_id.attr,
4751 &dev_attr_name.attr,
4752 &dev_attr_image_id.attr,
4753 &dev_attr_current_snap.attr,
4754 &dev_attr_snap_id.attr,
4755 &dev_attr_parent.attr,
4756 &dev_attr_refresh.attr,
4760 static struct attribute_group rbd_attr_group = {
4764 static const struct attribute_group *rbd_attr_groups[] = {
4769 static void rbd_dev_release(struct device *dev);
4771 static struct device_type rbd_device_type = {
4773 .groups = rbd_attr_groups,
4774 .release = rbd_dev_release,
4777 static struct rbd_spec *rbd_spec_get(struct rbd_spec *spec)
4779 kref_get(&spec->kref);
4784 static void rbd_spec_free(struct kref *kref);
4785 static void rbd_spec_put(struct rbd_spec *spec)
4788 kref_put(&spec->kref, rbd_spec_free);
4791 static struct rbd_spec *rbd_spec_alloc(void)
4793 struct rbd_spec *spec;
4795 spec = kzalloc(sizeof (*spec), GFP_KERNEL);
4799 spec->pool_id = CEPH_NOPOOL;
4800 spec->snap_id = CEPH_NOSNAP;
4801 kref_init(&spec->kref);
4806 static void rbd_spec_free(struct kref *kref)
4808 struct rbd_spec *spec = container_of(kref, struct rbd_spec, kref);
4810 kfree(spec->pool_name);
4811 kfree(spec->image_id);
4812 kfree(spec->image_name);
4813 kfree(spec->snap_name);
4817 static void rbd_dev_free(struct rbd_device *rbd_dev)
4819 WARN_ON(rbd_dev->watch_state != RBD_WATCH_STATE_UNREGISTERED);
4820 WARN_ON(rbd_dev->lock_state != RBD_LOCK_STATE_UNLOCKED);
4822 ceph_oid_destroy(&rbd_dev->header_oid);
4823 ceph_oloc_destroy(&rbd_dev->header_oloc);
4824 kfree(rbd_dev->config_info);
4826 rbd_put_client(rbd_dev->rbd_client);
4827 rbd_spec_put(rbd_dev->spec);
4828 kfree(rbd_dev->opts);
4832 static void rbd_dev_release(struct device *dev)
4834 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4835 bool need_put = !!rbd_dev->opts;
4838 destroy_workqueue(rbd_dev->task_wq);
4839 ida_simple_remove(&rbd_dev_id_ida, rbd_dev->dev_id);
4842 rbd_dev_free(rbd_dev);
4845 * This is racy, but way better than putting module outside of
4846 * the release callback. The race window is pretty small, so
4847 * doing something similar to dm (dm-builtin.c) is overkill.
4850 module_put(THIS_MODULE);
4853 static struct rbd_device *__rbd_dev_create(struct rbd_client *rbdc,
4854 struct rbd_spec *spec)
4856 struct rbd_device *rbd_dev;
4858 rbd_dev = kzalloc(sizeof(*rbd_dev), GFP_KERNEL);
4862 spin_lock_init(&rbd_dev->lock);
4863 INIT_LIST_HEAD(&rbd_dev->node);
4864 init_rwsem(&rbd_dev->header_rwsem);
4866 ceph_oid_init(&rbd_dev->header_oid);
4867 ceph_oloc_init(&rbd_dev->header_oloc);
4869 mutex_init(&rbd_dev->watch_mutex);
4870 rbd_dev->watch_state = RBD_WATCH_STATE_UNREGISTERED;
4871 INIT_DELAYED_WORK(&rbd_dev->watch_dwork, rbd_reregister_watch);
4873 init_rwsem(&rbd_dev->lock_rwsem);
4874 rbd_dev->lock_state = RBD_LOCK_STATE_UNLOCKED;
4875 INIT_WORK(&rbd_dev->acquired_lock_work, rbd_notify_acquired_lock);
4876 INIT_WORK(&rbd_dev->released_lock_work, rbd_notify_released_lock);
4877 INIT_DELAYED_WORK(&rbd_dev->lock_dwork, rbd_acquire_lock);
4878 INIT_WORK(&rbd_dev->unlock_work, rbd_release_lock_work);
4879 init_waitqueue_head(&rbd_dev->lock_waitq);
4881 rbd_dev->dev.bus = &rbd_bus_type;
4882 rbd_dev->dev.type = &rbd_device_type;
4883 rbd_dev->dev.parent = &rbd_root_dev;
4884 device_initialize(&rbd_dev->dev);
4886 rbd_dev->rbd_client = rbdc;
4887 rbd_dev->spec = spec;
4889 rbd_dev->layout.stripe_unit = 1 << RBD_MAX_OBJ_ORDER;
4890 rbd_dev->layout.stripe_count = 1;
4891 rbd_dev->layout.object_size = 1 << RBD_MAX_OBJ_ORDER;
4892 rbd_dev->layout.pool_id = spec->pool_id;
4893 RCU_INIT_POINTER(rbd_dev->layout.pool_ns, NULL);
4899 * Create a mapping rbd_dev.
4901 static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
4902 struct rbd_spec *spec,
4903 struct rbd_options *opts)
4905 struct rbd_device *rbd_dev;
4907 rbd_dev = __rbd_dev_create(rbdc, spec);
4911 rbd_dev->opts = opts;
4913 /* get an id and fill in device name */
4914 rbd_dev->dev_id = ida_simple_get(&rbd_dev_id_ida, 0,
4915 minor_to_rbd_dev_id(1 << MINORBITS),
4917 if (rbd_dev->dev_id < 0)
4920 sprintf(rbd_dev->name, RBD_DRV_NAME "%d", rbd_dev->dev_id);
4921 rbd_dev->task_wq = alloc_ordered_workqueue("%s-tasks", WQ_MEM_RECLAIM,
4923 if (!rbd_dev->task_wq)
4926 /* we have a ref from do_rbd_add() */
4927 __module_get(THIS_MODULE);
4929 dout("%s rbd_dev %p dev_id %d\n", __func__, rbd_dev, rbd_dev->dev_id);
4933 ida_simple_remove(&rbd_dev_id_ida, rbd_dev->dev_id);
4935 rbd_dev_free(rbd_dev);
4939 static void rbd_dev_destroy(struct rbd_device *rbd_dev)
4942 put_device(&rbd_dev->dev);
4946 * Get the size and object order for an image snapshot, or if
4947 * snap_id is CEPH_NOSNAP, gets this information for the base
4950 static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
4951 u8 *order, u64 *snap_size)
4953 __le64 snapid = cpu_to_le64(snap_id);
4958 } __attribute__ ((packed)) size_buf = { 0 };
4960 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_oid.name,
4962 &snapid, sizeof (snapid),
4963 &size_buf, sizeof (size_buf));
4964 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4967 if (ret < sizeof (size_buf))
4971 *order = size_buf.order;
4972 dout(" order %u", (unsigned int)*order);
4974 *snap_size = le64_to_cpu(size_buf.size);
4976 dout(" snap_id 0x%016llx snap_size = %llu\n",
4977 (unsigned long long)snap_id,
4978 (unsigned long long)*snap_size);
4983 static int rbd_dev_v2_image_size(struct rbd_device *rbd_dev)
4985 return _rbd_dev_v2_snap_size(rbd_dev, CEPH_NOSNAP,
4986 &rbd_dev->header.obj_order,
4987 &rbd_dev->header.image_size);
4990 static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev)
4996 reply_buf = kzalloc(RBD_OBJ_PREFIX_LEN_MAX, GFP_KERNEL);
5000 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_oid.name,
5001 "rbd", "get_object_prefix", NULL, 0,
5002 reply_buf, RBD_OBJ_PREFIX_LEN_MAX);
5003 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
5008 rbd_dev->header.object_prefix = ceph_extract_encoded_string(&p,
5009 p + ret, NULL, GFP_NOIO);
5012 if (IS_ERR(rbd_dev->header.object_prefix)) {
5013 ret = PTR_ERR(rbd_dev->header.object_prefix);
5014 rbd_dev->header.object_prefix = NULL;
5016 dout(" object_prefix = %s\n", rbd_dev->header.object_prefix);
5024 static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
5027 __le64 snapid = cpu_to_le64(snap_id);
5031 } __attribute__ ((packed)) features_buf = { 0 };
5035 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_oid.name,
5036 "rbd", "get_features",
5037 &snapid, sizeof (snapid),
5038 &features_buf, sizeof (features_buf));
5039 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
5042 if (ret < sizeof (features_buf))
5045 unsup = le64_to_cpu(features_buf.incompat) & ~RBD_FEATURES_SUPPORTED;
5047 rbd_warn(rbd_dev, "image uses unsupported features: 0x%llx",
5052 *snap_features = le64_to_cpu(features_buf.features);
5054 dout(" snap_id 0x%016llx features = 0x%016llx incompat = 0x%016llx\n",
5055 (unsigned long long)snap_id,
5056 (unsigned long long)*snap_features,
5057 (unsigned long long)le64_to_cpu(features_buf.incompat));
5062 static int rbd_dev_v2_features(struct rbd_device *rbd_dev)
5064 return _rbd_dev_v2_snap_features(rbd_dev, CEPH_NOSNAP,
5065 &rbd_dev->header.features);
5068 static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
5070 struct rbd_spec *parent_spec;
5072 void *reply_buf = NULL;
5082 parent_spec = rbd_spec_alloc();
5086 size = sizeof (__le64) + /* pool_id */
5087 sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX + /* image_id */
5088 sizeof (__le64) + /* snap_id */
5089 sizeof (__le64); /* overlap */
5090 reply_buf = kmalloc(size, GFP_KERNEL);
5096 snapid = cpu_to_le64(rbd_dev->spec->snap_id);
5097 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_oid.name,
5098 "rbd", "get_parent",
5099 &snapid, sizeof (snapid),
5101 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
5106 end = reply_buf + ret;
5108 ceph_decode_64_safe(&p, end, pool_id, out_err);
5109 if (pool_id == CEPH_NOPOOL) {
5111 * Either the parent never existed, or we have
5112 * record of it but the image got flattened so it no
5113 * longer has a parent. When the parent of a
5114 * layered image disappears we immediately set the
5115 * overlap to 0. The effect of this is that all new
5116 * requests will be treated as if the image had no
5119 if (rbd_dev->parent_overlap) {
5120 rbd_dev->parent_overlap = 0;
5121 rbd_dev_parent_put(rbd_dev);
5122 pr_info("%s: clone image has been flattened\n",
5123 rbd_dev->disk->disk_name);
5126 goto out; /* No parent? No problem. */
5129 /* The ceph file layout needs to fit pool id in 32 bits */
5132 if (pool_id > (u64)U32_MAX) {
5133 rbd_warn(NULL, "parent pool id too large (%llu > %u)",
5134 (unsigned long long)pool_id, U32_MAX);
5138 image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
5139 if (IS_ERR(image_id)) {
5140 ret = PTR_ERR(image_id);
5143 ceph_decode_64_safe(&p, end, snap_id, out_err);
5144 ceph_decode_64_safe(&p, end, overlap, out_err);
5147 * The parent won't change (except when the clone is
5148 * flattened, already handled that). So we only need to
5149 * record the parent spec we have not already done so.
5151 if (!rbd_dev->parent_spec) {
5152 parent_spec->pool_id = pool_id;
5153 parent_spec->image_id = image_id;
5154 parent_spec->snap_id = snap_id;
5155 rbd_dev->parent_spec = parent_spec;
5156 parent_spec = NULL; /* rbd_dev now owns this */
5162 * We always update the parent overlap. If it's zero we issue
5163 * a warning, as we will proceed as if there was no parent.
5167 /* refresh, careful to warn just once */
5168 if (rbd_dev->parent_overlap)
5170 "clone now standalone (overlap became 0)");
5173 rbd_warn(rbd_dev, "clone is standalone (overlap 0)");
5176 rbd_dev->parent_overlap = overlap;
5182 rbd_spec_put(parent_spec);
5187 static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev)
5191 __le64 stripe_count;
5192 } __attribute__ ((packed)) striping_info_buf = { 0 };
5193 size_t size = sizeof (striping_info_buf);
5200 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_oid.name,
5201 "rbd", "get_stripe_unit_count", NULL, 0,
5202 (char *)&striping_info_buf, size);
5203 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
5210 * We don't actually support the "fancy striping" feature
5211 * (STRIPINGV2) yet, but if the striping sizes are the
5212 * defaults the behavior is the same as before. So find
5213 * out, and only fail if the image has non-default values.
5216 obj_size = (u64)1 << rbd_dev->header.obj_order;
5217 p = &striping_info_buf;
5218 stripe_unit = ceph_decode_64(&p);
5219 if (stripe_unit != obj_size) {
5220 rbd_warn(rbd_dev, "unsupported stripe unit "
5221 "(got %llu want %llu)",
5222 stripe_unit, obj_size);
5225 stripe_count = ceph_decode_64(&p);
5226 if (stripe_count != 1) {
5227 rbd_warn(rbd_dev, "unsupported stripe count "
5228 "(got %llu want 1)", stripe_count);
5231 rbd_dev->header.stripe_unit = stripe_unit;
5232 rbd_dev->header.stripe_count = stripe_count;
5237 static char *rbd_dev_image_name(struct rbd_device *rbd_dev)
5239 size_t image_id_size;
5244 void *reply_buf = NULL;
5246 char *image_name = NULL;
5249 rbd_assert(!rbd_dev->spec->image_name);
5251 len = strlen(rbd_dev->spec->image_id);
5252 image_id_size = sizeof (__le32) + len;
5253 image_id = kmalloc(image_id_size, GFP_KERNEL);
5258 end = image_id + image_id_size;
5259 ceph_encode_string(&p, end, rbd_dev->spec->image_id, (u32)len);
5261 size = sizeof (__le32) + RBD_IMAGE_NAME_LEN_MAX;
5262 reply_buf = kmalloc(size, GFP_KERNEL);
5266 ret = rbd_obj_method_sync(rbd_dev, RBD_DIRECTORY,
5267 "rbd", "dir_get_name",
5268 image_id, image_id_size,
5273 end = reply_buf + ret;
5275 image_name = ceph_extract_encoded_string(&p, end, &len, GFP_KERNEL);
5276 if (IS_ERR(image_name))
5279 dout("%s: name is %s len is %zd\n", __func__, image_name, len);
5287 static u64 rbd_v1_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
5289 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
5290 const char *snap_name;
5293 /* Skip over names until we find the one we are looking for */
5295 snap_name = rbd_dev->header.snap_names;
5296 while (which < snapc->num_snaps) {
5297 if (!strcmp(name, snap_name))
5298 return snapc->snaps[which];
5299 snap_name += strlen(snap_name) + 1;
5305 static u64 rbd_v2_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
5307 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
5312 for (which = 0; !found && which < snapc->num_snaps; which++) {
5313 const char *snap_name;
5315 snap_id = snapc->snaps[which];
5316 snap_name = rbd_dev_v2_snap_name(rbd_dev, snap_id);
5317 if (IS_ERR(snap_name)) {
5318 /* ignore no-longer existing snapshots */
5319 if (PTR_ERR(snap_name) == -ENOENT)
5324 found = !strcmp(name, snap_name);
5327 return found ? snap_id : CEPH_NOSNAP;
5331 * Assumes name is never RBD_SNAP_HEAD_NAME; returns CEPH_NOSNAP if
5332 * no snapshot by that name is found, or if an error occurs.
5334 static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
5336 if (rbd_dev->image_format == 1)
5337 return rbd_v1_snap_id_by_name(rbd_dev, name);
5339 return rbd_v2_snap_id_by_name(rbd_dev, name);
5343 * An image being mapped will have everything but the snap id.
5345 static int rbd_spec_fill_snap_id(struct rbd_device *rbd_dev)
5347 struct rbd_spec *spec = rbd_dev->spec;
5349 rbd_assert(spec->pool_id != CEPH_NOPOOL && spec->pool_name);
5350 rbd_assert(spec->image_id && spec->image_name);
5351 rbd_assert(spec->snap_name);
5353 if (strcmp(spec->snap_name, RBD_SNAP_HEAD_NAME)) {
5356 snap_id = rbd_snap_id_by_name(rbd_dev, spec->snap_name);
5357 if (snap_id == CEPH_NOSNAP)
5360 spec->snap_id = snap_id;
5362 spec->snap_id = CEPH_NOSNAP;
5369 * A parent image will have all ids but none of the names.
5371 * All names in an rbd spec are dynamically allocated. It's OK if we
5372 * can't figure out the name for an image id.
5374 static int rbd_spec_fill_names(struct rbd_device *rbd_dev)
5376 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
5377 struct rbd_spec *spec = rbd_dev->spec;
5378 const char *pool_name;
5379 const char *image_name;
5380 const char *snap_name;
5383 rbd_assert(spec->pool_id != CEPH_NOPOOL);
5384 rbd_assert(spec->image_id);
5385 rbd_assert(spec->snap_id != CEPH_NOSNAP);
5387 /* Get the pool name; we have to make our own copy of this */
5389 pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, spec->pool_id);
5391 rbd_warn(rbd_dev, "no pool with id %llu", spec->pool_id);
5394 pool_name = kstrdup(pool_name, GFP_KERNEL);
5398 /* Fetch the image name; tolerate failure here */
5400 image_name = rbd_dev_image_name(rbd_dev);
5402 rbd_warn(rbd_dev, "unable to get image name");
5404 /* Fetch the snapshot name */
5406 snap_name = rbd_snap_name(rbd_dev, spec->snap_id);
5407 if (IS_ERR(snap_name)) {
5408 ret = PTR_ERR(snap_name);
5412 spec->pool_name = pool_name;
5413 spec->image_name = image_name;
5414 spec->snap_name = snap_name;
5424 static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev)
5433 struct ceph_snap_context *snapc;
5437 * We'll need room for the seq value (maximum snapshot id),
5438 * snapshot count, and array of that many snapshot ids.
5439 * For now we have a fixed upper limit on the number we're
5440 * prepared to receive.
5442 size = sizeof (__le64) + sizeof (__le32) +
5443 RBD_MAX_SNAP_COUNT * sizeof (__le64);
5444 reply_buf = kzalloc(size, GFP_KERNEL);
5448 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_oid.name,
5449 "rbd", "get_snapcontext", NULL, 0,
5451 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
5456 end = reply_buf + ret;
5458 ceph_decode_64_safe(&p, end, seq, out);
5459 ceph_decode_32_safe(&p, end, snap_count, out);
5462 * Make sure the reported number of snapshot ids wouldn't go
5463 * beyond the end of our buffer. But before checking that,
5464 * make sure the computed size of the snapshot context we
5465 * allocate is representable in a size_t.
5467 if (snap_count > (SIZE_MAX - sizeof (struct ceph_snap_context))
5472 if (!ceph_has_room(&p, end, snap_count * sizeof (__le64)))
5476 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
5482 for (i = 0; i < snap_count; i++)
5483 snapc->snaps[i] = ceph_decode_64(&p);
5485 ceph_put_snap_context(rbd_dev->header.snapc);
5486 rbd_dev->header.snapc = snapc;
5488 dout(" snap context seq = %llu, snap_count = %u\n",
5489 (unsigned long long)seq, (unsigned int)snap_count);
5496 static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
5507 size = sizeof (__le32) + RBD_MAX_SNAP_NAME_LEN;
5508 reply_buf = kmalloc(size, GFP_KERNEL);
5510 return ERR_PTR(-ENOMEM);
5512 snapid = cpu_to_le64(snap_id);
5513 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_oid.name,
5514 "rbd", "get_snapshot_name",
5515 &snapid, sizeof (snapid),
5517 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
5519 snap_name = ERR_PTR(ret);
5524 end = reply_buf + ret;
5525 snap_name = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
5526 if (IS_ERR(snap_name))
5529 dout(" snap_id 0x%016llx snap_name = %s\n",
5530 (unsigned long long)snap_id, snap_name);
5537 static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev)
5539 bool first_time = rbd_dev->header.object_prefix == NULL;
5542 ret = rbd_dev_v2_image_size(rbd_dev);
5547 ret = rbd_dev_v2_header_onetime(rbd_dev);
5552 ret = rbd_dev_v2_snap_context(rbd_dev);
5553 if (ret && first_time) {
5554 kfree(rbd_dev->header.object_prefix);
5555 rbd_dev->header.object_prefix = NULL;
5561 static int rbd_dev_header_info(struct rbd_device *rbd_dev)
5563 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
5565 if (rbd_dev->image_format == 1)
5566 return rbd_dev_v1_header_info(rbd_dev);
5568 return rbd_dev_v2_header_info(rbd_dev);
5572 * Skips over white space at *buf, and updates *buf to point to the
5573 * first found non-space character (if any). Returns the length of
5574 * the token (string of non-white space characters) found. Note
5575 * that *buf must be terminated with '\0'.
5577 static inline size_t next_token(const char **buf)
5580 * These are the characters that produce nonzero for
5581 * isspace() in the "C" and "POSIX" locales.
5583 const char *spaces = " \f\n\r\t\v";
5585 *buf += strspn(*buf, spaces); /* Find start of token */
5587 return strcspn(*buf, spaces); /* Return token length */
5591 * Finds the next token in *buf, dynamically allocates a buffer big
5592 * enough to hold a copy of it, and copies the token into the new
5593 * buffer. The copy is guaranteed to be terminated with '\0'. Note
5594 * that a duplicate buffer is created even for a zero-length token.
5596 * Returns a pointer to the newly-allocated duplicate, or a null
5597 * pointer if memory for the duplicate was not available. If
5598 * the lenp argument is a non-null pointer, the length of the token
5599 * (not including the '\0') is returned in *lenp.
5601 * If successful, the *buf pointer will be updated to point beyond
5602 * the end of the found token.
5604 * Note: uses GFP_KERNEL for allocation.
5606 static inline char *dup_token(const char **buf, size_t *lenp)
5611 len = next_token(buf);
5612 dup = kmemdup(*buf, len + 1, GFP_KERNEL);
5615 *(dup + len) = '\0';
5625 * Parse the options provided for an "rbd add" (i.e., rbd image
5626 * mapping) request. These arrive via a write to /sys/bus/rbd/add,
5627 * and the data written is passed here via a NUL-terminated buffer.
5628 * Returns 0 if successful or an error code otherwise.
5630 * The information extracted from these options is recorded in
5631 * the other parameters which return dynamically-allocated
5634 * The address of a pointer that will refer to a ceph options
5635 * structure. Caller must release the returned pointer using
5636 * ceph_destroy_options() when it is no longer needed.
5638 * Address of an rbd options pointer. Fully initialized by
5639 * this function; caller must release with kfree().
5641 * Address of an rbd image specification pointer. Fully
5642 * initialized by this function based on parsed options.
5643 * Caller must release with rbd_spec_put().
5645 * The options passed take this form:
5646 * <mon_addrs> <options> <pool_name> <image_name> [<snap_id>]
5649 * A comma-separated list of one or more monitor addresses.
5650 * A monitor address is an ip address, optionally followed
5651 * by a port number (separated by a colon).
5652 * I.e.: ip1[:port1][,ip2[:port2]...]
5654 * A comma-separated list of ceph and/or rbd options.
5656 * The name of the rados pool containing the rbd image.
5658 * The name of the image in that pool to map.
5660 * An optional snapshot id. If provided, the mapping will
5661 * present data from the image at the time that snapshot was
5662 * created. The image head is used if no snapshot id is
5663 * provided. Snapshot mappings are always read-only.
5665 static int rbd_add_parse_args(const char *buf,
5666 struct ceph_options **ceph_opts,
5667 struct rbd_options **opts,
5668 struct rbd_spec **rbd_spec)
5672 const char *mon_addrs;
5674 size_t mon_addrs_size;
5675 struct rbd_spec *spec = NULL;
5676 struct rbd_options *rbd_opts = NULL;
5677 struct ceph_options *copts;
5680 /* The first four tokens are required */
5682 len = next_token(&buf);
5684 rbd_warn(NULL, "no monitor address(es) provided");
5688 mon_addrs_size = len + 1;
5692 options = dup_token(&buf, NULL);
5696 rbd_warn(NULL, "no options provided");
5700 spec = rbd_spec_alloc();
5704 spec->pool_name = dup_token(&buf, NULL);
5705 if (!spec->pool_name)
5707 if (!*spec->pool_name) {
5708 rbd_warn(NULL, "no pool name provided");
5712 spec->image_name = dup_token(&buf, NULL);
5713 if (!spec->image_name)
5715 if (!*spec->image_name) {
5716 rbd_warn(NULL, "no image name provided");
5721 * Snapshot name is optional; default is to use "-"
5722 * (indicating the head/no snapshot).
5724 len = next_token(&buf);
5726 buf = RBD_SNAP_HEAD_NAME; /* No snapshot supplied */
5727 len = sizeof (RBD_SNAP_HEAD_NAME) - 1;
5728 } else if (len > RBD_MAX_SNAP_NAME_LEN) {
5729 ret = -ENAMETOOLONG;
5732 snap_name = kmemdup(buf, len + 1, GFP_KERNEL);
5735 *(snap_name + len) = '\0';
5736 spec->snap_name = snap_name;
5738 /* Initialize all rbd options to the defaults */
5740 rbd_opts = kzalloc(sizeof (*rbd_opts), GFP_KERNEL);
5744 rbd_opts->read_only = RBD_READ_ONLY_DEFAULT;
5745 rbd_opts->queue_depth = RBD_QUEUE_DEPTH_DEFAULT;
5746 rbd_opts->lock_on_read = RBD_LOCK_ON_READ_DEFAULT;
5748 copts = ceph_parse_options(options, mon_addrs,
5749 mon_addrs + mon_addrs_size - 1,
5750 parse_rbd_opts_token, rbd_opts);
5751 if (IS_ERR(copts)) {
5752 ret = PTR_ERR(copts);
5773 * Return pool id (>= 0) or a negative error code.
5775 static int rbd_add_get_pool_id(struct rbd_client *rbdc, const char *pool_name)
5777 struct ceph_options *opts = rbdc->client->options;
5783 ret = ceph_pg_poolid_by_name(rbdc->client->osdc.osdmap, pool_name);
5784 if (ret == -ENOENT && tries++ < 1) {
5785 ret = ceph_monc_get_version(&rbdc->client->monc, "osdmap",
5790 if (rbdc->client->osdc.osdmap->epoch < newest_epoch) {
5791 ceph_osdc_maybe_request_map(&rbdc->client->osdc);
5792 (void) ceph_monc_wait_osdmap(&rbdc->client->monc,
5794 opts->mount_timeout);
5797 /* the osdmap we have is new enough */
5806 * An rbd format 2 image has a unique identifier, distinct from the
5807 * name given to it by the user. Internally, that identifier is
5808 * what's used to specify the names of objects related to the image.
5810 * A special "rbd id" object is used to map an rbd image name to its
5811 * id. If that object doesn't exist, then there is no v2 rbd image
5812 * with the supplied name.
5814 * This function will record the given rbd_dev's image_id field if
5815 * it can be determined, and in that case will return 0. If any
5816 * errors occur a negative errno will be returned and the rbd_dev's
5817 * image_id field will be unchanged (and should be NULL).
5819 static int rbd_dev_image_id(struct rbd_device *rbd_dev)
5828 * When probing a parent image, the image id is already
5829 * known (and the image name likely is not). There's no
5830 * need to fetch the image id again in this case. We
5831 * do still need to set the image format though.
5833 if (rbd_dev->spec->image_id) {
5834 rbd_dev->image_format = *rbd_dev->spec->image_id ? 2 : 1;
5840 * First, see if the format 2 image id file exists, and if
5841 * so, get the image's persistent id from it.
5843 size = sizeof (RBD_ID_PREFIX) + strlen(rbd_dev->spec->image_name);
5844 object_name = kmalloc(size, GFP_NOIO);
5847 sprintf(object_name, "%s%s", RBD_ID_PREFIX, rbd_dev->spec->image_name);
5848 dout("rbd id object name is %s\n", object_name);
5850 /* Response will be an encoded string, which includes a length */
5852 size = sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX;
5853 response = kzalloc(size, GFP_NOIO);
5859 /* If it doesn't exist we'll assume it's a format 1 image */
5861 ret = rbd_obj_method_sync(rbd_dev, object_name,
5862 "rbd", "get_id", NULL, 0,
5863 response, RBD_IMAGE_ID_LEN_MAX);
5864 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
5865 if (ret == -ENOENT) {
5866 image_id = kstrdup("", GFP_KERNEL);
5867 ret = image_id ? 0 : -ENOMEM;
5869 rbd_dev->image_format = 1;
5870 } else if (ret >= 0) {
5873 image_id = ceph_extract_encoded_string(&p, p + ret,
5875 ret = PTR_ERR_OR_ZERO(image_id);
5877 rbd_dev->image_format = 2;
5881 rbd_dev->spec->image_id = image_id;
5882 dout("image_id is %s\n", image_id);
5892 * Undo whatever state changes are made by v1 or v2 header info
5895 static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
5897 struct rbd_image_header *header;
5899 rbd_dev_parent_put(rbd_dev);
5901 /* Free dynamic fields from the header, then zero it out */
5903 header = &rbd_dev->header;
5904 ceph_put_snap_context(header->snapc);
5905 kfree(header->snap_sizes);
5906 kfree(header->snap_names);
5907 kfree(header->object_prefix);
5908 memset(header, 0, sizeof (*header));
5911 static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev)
5915 ret = rbd_dev_v2_object_prefix(rbd_dev);
5920 * Get the and check features for the image. Currently the
5921 * features are assumed to never change.
5923 ret = rbd_dev_v2_features(rbd_dev);
5927 /* If the image supports fancy striping, get its parameters */
5929 if (rbd_dev->header.features & RBD_FEATURE_STRIPINGV2) {
5930 ret = rbd_dev_v2_striping_info(rbd_dev);
5934 /* No support for crypto and compression type format 2 images */
5938 rbd_dev->header.features = 0;
5939 kfree(rbd_dev->header.object_prefix);
5940 rbd_dev->header.object_prefix = NULL;
5946 * @depth is rbd_dev_image_probe() -> rbd_dev_probe_parent() ->
5947 * rbd_dev_image_probe() recursion depth, which means it's also the
5948 * length of the already discovered part of the parent chain.
5950 static int rbd_dev_probe_parent(struct rbd_device *rbd_dev, int depth)
5952 struct rbd_device *parent = NULL;
5955 if (!rbd_dev->parent_spec)
5958 if (++depth > RBD_MAX_PARENT_CHAIN_LEN) {
5959 pr_info("parent chain is too long (%d)\n", depth);
5964 parent = __rbd_dev_create(rbd_dev->rbd_client, rbd_dev->parent_spec);
5971 * Images related by parent/child relationships always share
5972 * rbd_client and spec/parent_spec, so bump their refcounts.
5974 __rbd_get_client(rbd_dev->rbd_client);
5975 rbd_spec_get(rbd_dev->parent_spec);
5977 ret = rbd_dev_image_probe(parent, depth);
5981 rbd_dev->parent = parent;
5982 atomic_set(&rbd_dev->parent_ref, 1);
5986 rbd_dev_unparent(rbd_dev);
5987 rbd_dev_destroy(parent);
5992 * rbd_dev->header_rwsem must be locked for write and will be unlocked
5995 static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
5999 /* Record our major and minor device numbers. */
6001 if (!single_major) {
6002 ret = register_blkdev(0, rbd_dev->name);
6004 goto err_out_unlock;
6006 rbd_dev->major = ret;
6009 rbd_dev->major = rbd_major;
6010 rbd_dev->minor = rbd_dev_id_to_minor(rbd_dev->dev_id);
6013 /* Set up the blkdev mapping. */
6015 ret = rbd_init_disk(rbd_dev);
6017 goto err_out_blkdev;
6019 ret = rbd_dev_mapping_set(rbd_dev);
6023 set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
6024 set_disk_ro(rbd_dev->disk, rbd_dev->mapping.read_only);
6026 dev_set_name(&rbd_dev->dev, "%d", rbd_dev->dev_id);
6027 ret = device_add(&rbd_dev->dev);
6029 goto err_out_mapping;
6031 /* Everything's ready. Announce the disk to the world. */
6033 set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
6034 up_write(&rbd_dev->header_rwsem);
6036 spin_lock(&rbd_dev_list_lock);
6037 list_add_tail(&rbd_dev->node, &rbd_dev_list);
6038 spin_unlock(&rbd_dev_list_lock);
6040 add_disk(rbd_dev->disk);
6041 pr_info("%s: capacity %llu features 0x%llx\n", rbd_dev->disk->disk_name,
6042 (unsigned long long)get_capacity(rbd_dev->disk) << SECTOR_SHIFT,
6043 rbd_dev->header.features);
6048 rbd_dev_mapping_clear(rbd_dev);
6050 rbd_free_disk(rbd_dev);
6053 unregister_blkdev(rbd_dev->major, rbd_dev->name);
6055 up_write(&rbd_dev->header_rwsem);
6059 static int rbd_dev_header_name(struct rbd_device *rbd_dev)
6061 struct rbd_spec *spec = rbd_dev->spec;
6064 /* Record the header object name for this rbd image. */
6066 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
6068 rbd_dev->header_oloc.pool = rbd_dev->layout.pool_id;
6069 if (rbd_dev->image_format == 1)
6070 ret = ceph_oid_aprintf(&rbd_dev->header_oid, GFP_KERNEL, "%s%s",
6071 spec->image_name, RBD_SUFFIX);
6073 ret = ceph_oid_aprintf(&rbd_dev->header_oid, GFP_KERNEL, "%s%s",
6074 RBD_HEADER_PREFIX, spec->image_id);
6079 static void rbd_dev_image_release(struct rbd_device *rbd_dev)
6081 rbd_dev_unprobe(rbd_dev);
6082 rbd_dev->image_format = 0;
6083 kfree(rbd_dev->spec->image_id);
6084 rbd_dev->spec->image_id = NULL;
6086 rbd_dev_destroy(rbd_dev);
6090 * Probe for the existence of the header object for the given rbd
6091 * device. If this image is the one being mapped (i.e., not a
6092 * parent), initiate a watch on its header object before using that
6093 * object to get detailed information about the rbd image.
6095 static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth)
6100 * Get the id from the image id object. Unless there's an
6101 * error, rbd_dev->spec->image_id will be filled in with
6102 * a dynamically-allocated string, and rbd_dev->image_format
6103 * will be set to either 1 or 2.
6105 ret = rbd_dev_image_id(rbd_dev);
6109 ret = rbd_dev_header_name(rbd_dev);
6111 goto err_out_format;
6114 ret = rbd_register_watch(rbd_dev);
6117 pr_info("image %s/%s does not exist\n",
6118 rbd_dev->spec->pool_name,
6119 rbd_dev->spec->image_name);
6120 goto err_out_format;
6124 ret = rbd_dev_header_info(rbd_dev);
6129 * If this image is the one being mapped, we have pool name and
6130 * id, image name and id, and snap name - need to fill snap id.
6131 * Otherwise this is a parent image, identified by pool, image
6132 * and snap ids - need to fill in names for those ids.
6135 ret = rbd_spec_fill_snap_id(rbd_dev);
6137 ret = rbd_spec_fill_names(rbd_dev);
6140 pr_info("snap %s/%s@%s does not exist\n",
6141 rbd_dev->spec->pool_name,
6142 rbd_dev->spec->image_name,
6143 rbd_dev->spec->snap_name);
6147 if (rbd_dev->header.features & RBD_FEATURE_LAYERING) {
6148 ret = rbd_dev_v2_parent_info(rbd_dev);
6153 * Need to warn users if this image is the one being
6154 * mapped and has a parent.
6156 if (!depth && rbd_dev->parent_spec)
6158 "WARNING: kernel layering is EXPERIMENTAL!");
6161 ret = rbd_dev_probe_parent(rbd_dev, depth);
6165 dout("discovered format %u image, header name is %s\n",
6166 rbd_dev->image_format, rbd_dev->header_oid.name);
6170 rbd_dev_unprobe(rbd_dev);
6173 rbd_unregister_watch(rbd_dev);
6175 rbd_dev->image_format = 0;
6176 kfree(rbd_dev->spec->image_id);
6177 rbd_dev->spec->image_id = NULL;
6181 static ssize_t do_rbd_add(struct bus_type *bus,
6185 struct rbd_device *rbd_dev = NULL;
6186 struct ceph_options *ceph_opts = NULL;
6187 struct rbd_options *rbd_opts = NULL;
6188 struct rbd_spec *spec = NULL;
6189 struct rbd_client *rbdc;
6193 if (!try_module_get(THIS_MODULE))
6196 /* parse add command */
6197 rc = rbd_add_parse_args(buf, &ceph_opts, &rbd_opts, &spec);
6201 rbdc = rbd_get_client(ceph_opts);
6208 rc = rbd_add_get_pool_id(rbdc, spec->pool_name);
6211 pr_info("pool %s does not exist\n", spec->pool_name);
6212 goto err_out_client;
6214 spec->pool_id = (u64)rc;
6216 rbd_dev = rbd_dev_create(rbdc, spec, rbd_opts);
6219 goto err_out_client;
6221 rbdc = NULL; /* rbd_dev now owns this */
6222 spec = NULL; /* rbd_dev now owns this */
6223 rbd_opts = NULL; /* rbd_dev now owns this */
6225 rbd_dev->config_info = kstrdup(buf, GFP_KERNEL);
6226 if (!rbd_dev->config_info) {
6228 goto err_out_rbd_dev;
6231 down_write(&rbd_dev->header_rwsem);
6232 rc = rbd_dev_image_probe(rbd_dev, 0);
6234 up_write(&rbd_dev->header_rwsem);
6235 goto err_out_rbd_dev;
6238 /* If we are mapping a snapshot it must be marked read-only */
6240 read_only = rbd_dev->opts->read_only;
6241 if (rbd_dev->spec->snap_id != CEPH_NOSNAP)
6243 rbd_dev->mapping.read_only = read_only;
6245 rc = rbd_dev_device_setup(rbd_dev);
6248 * rbd_unregister_watch() can't be moved into
6249 * rbd_dev_image_release() without refactoring, see
6250 * commit 1f3ef78861ac.
6252 rbd_unregister_watch(rbd_dev);
6253 rbd_dev_image_release(rbd_dev);
6259 module_put(THIS_MODULE);
6263 rbd_dev_destroy(rbd_dev);
6265 rbd_put_client(rbdc);
6272 static ssize_t rbd_add(struct bus_type *bus,
6279 return do_rbd_add(bus, buf, count);
6282 static ssize_t rbd_add_single_major(struct bus_type *bus,
6286 return do_rbd_add(bus, buf, count);
6289 static void rbd_dev_device_release(struct rbd_device *rbd_dev)
6291 rbd_free_disk(rbd_dev);
6293 spin_lock(&rbd_dev_list_lock);
6294 list_del_init(&rbd_dev->node);
6295 spin_unlock(&rbd_dev_list_lock);
6297 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
6298 device_del(&rbd_dev->dev);
6299 rbd_dev_mapping_clear(rbd_dev);
6301 unregister_blkdev(rbd_dev->major, rbd_dev->name);
6304 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev)
6306 while (rbd_dev->parent) {
6307 struct rbd_device *first = rbd_dev;
6308 struct rbd_device *second = first->parent;
6309 struct rbd_device *third;
6312 * Follow to the parent with no grandparent and
6315 while (second && (third = second->parent)) {
6320 rbd_dev_image_release(second);
6321 first->parent = NULL;
6322 first->parent_overlap = 0;
6324 rbd_assert(first->parent_spec);
6325 rbd_spec_put(first->parent_spec);
6326 first->parent_spec = NULL;
6330 static ssize_t do_rbd_remove(struct bus_type *bus,
6334 struct rbd_device *rbd_dev = NULL;
6335 struct list_head *tmp;
6338 bool already = false;
6344 sscanf(buf, "%d %5s", &dev_id, opt_buf);
6346 pr_err("dev_id out of range\n");
6349 if (opt_buf[0] != '\0') {
6350 if (!strcmp(opt_buf, "force")) {
6353 pr_err("bad remove option at '%s'\n", opt_buf);
6359 spin_lock(&rbd_dev_list_lock);
6360 list_for_each(tmp, &rbd_dev_list) {
6361 rbd_dev = list_entry(tmp, struct rbd_device, node);
6362 if (rbd_dev->dev_id == dev_id) {
6368 spin_lock_irq(&rbd_dev->lock);
6369 if (rbd_dev->open_count && !force)
6372 already = test_and_set_bit(RBD_DEV_FLAG_REMOVING,
6374 spin_unlock_irq(&rbd_dev->lock);
6376 spin_unlock(&rbd_dev_list_lock);
6377 if (ret < 0 || already)
6382 * Prevent new IO from being queued and wait for existing
6383 * IO to complete/fail.
6385 blk_mq_freeze_queue(rbd_dev->disk->queue);
6386 blk_set_queue_dying(rbd_dev->disk->queue);
6389 down_write(&rbd_dev->lock_rwsem);
6390 if (__rbd_is_lock_owner(rbd_dev))
6391 rbd_unlock(rbd_dev);
6392 up_write(&rbd_dev->lock_rwsem);
6393 rbd_unregister_watch(rbd_dev);
6396 * Don't free anything from rbd_dev->disk until after all
6397 * notifies are completely processed. Otherwise
6398 * rbd_bus_del_dev() will race with rbd_watch_cb(), resulting
6399 * in a potential use after free of rbd_dev->disk or rbd_dev.
6401 rbd_dev_device_release(rbd_dev);
6402 rbd_dev_image_release(rbd_dev);
6407 static ssize_t rbd_remove(struct bus_type *bus,
6414 return do_rbd_remove(bus, buf, count);
6417 static ssize_t rbd_remove_single_major(struct bus_type *bus,
6421 return do_rbd_remove(bus, buf, count);
6425 * create control files in sysfs
6428 static int rbd_sysfs_init(void)
6432 ret = device_register(&rbd_root_dev);
6436 ret = bus_register(&rbd_bus_type);
6438 device_unregister(&rbd_root_dev);
6443 static void rbd_sysfs_cleanup(void)
6445 bus_unregister(&rbd_bus_type);
6446 device_unregister(&rbd_root_dev);
6449 static int rbd_slab_init(void)
6451 rbd_assert(!rbd_img_request_cache);
6452 rbd_img_request_cache = KMEM_CACHE(rbd_img_request, 0);
6453 if (!rbd_img_request_cache)
6456 rbd_assert(!rbd_obj_request_cache);
6457 rbd_obj_request_cache = KMEM_CACHE(rbd_obj_request, 0);
6458 if (!rbd_obj_request_cache)
6461 rbd_assert(!rbd_segment_name_cache);
6462 rbd_segment_name_cache = kmem_cache_create("rbd_segment_name",
6463 CEPH_MAX_OID_NAME_LEN + 1, 1, 0, NULL);
6464 if (rbd_segment_name_cache)
6467 kmem_cache_destroy(rbd_obj_request_cache);
6468 rbd_obj_request_cache = NULL;
6470 kmem_cache_destroy(rbd_img_request_cache);
6471 rbd_img_request_cache = NULL;
6476 static void rbd_slab_exit(void)
6478 rbd_assert(rbd_segment_name_cache);
6479 kmem_cache_destroy(rbd_segment_name_cache);
6480 rbd_segment_name_cache = NULL;
6482 rbd_assert(rbd_obj_request_cache);
6483 kmem_cache_destroy(rbd_obj_request_cache);
6484 rbd_obj_request_cache = NULL;
6486 rbd_assert(rbd_img_request_cache);
6487 kmem_cache_destroy(rbd_img_request_cache);
6488 rbd_img_request_cache = NULL;
6491 static int __init rbd_init(void)
6495 if (!libceph_compatible(NULL)) {
6496 rbd_warn(NULL, "libceph incompatibility (quitting)");
6500 rc = rbd_slab_init();
6505 * The number of active work items is limited by the number of
6506 * rbd devices * queue depth, so leave @max_active at default.
6508 rbd_wq = alloc_workqueue(RBD_DRV_NAME, WQ_MEM_RECLAIM, 0);
6515 rbd_major = register_blkdev(0, RBD_DRV_NAME);
6516 if (rbd_major < 0) {
6522 rc = rbd_sysfs_init();
6524 goto err_out_blkdev;
6527 pr_info("loaded (major %d)\n", rbd_major);
6529 pr_info("loaded\n");
6535 unregister_blkdev(rbd_major, RBD_DRV_NAME);
6537 destroy_workqueue(rbd_wq);
6543 static void __exit rbd_exit(void)
6545 ida_destroy(&rbd_dev_id_ida);
6546 rbd_sysfs_cleanup();
6548 unregister_blkdev(rbd_major, RBD_DRV_NAME);
6549 destroy_workqueue(rbd_wq);
6553 module_init(rbd_init);
6554 module_exit(rbd_exit);
6556 MODULE_AUTHOR("Alex Elder <elder@inktank.com>");
6557 MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
6558 MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
6559 /* following authorship retained from original osdblk.c */
6560 MODULE_AUTHOR("Jeff Garzik <jeff@garzik.org>");
6562 MODULE_DESCRIPTION("RADOS Block Device (RBD) driver");
6563 MODULE_LICENSE("GPL");