3 rbd.c -- Export ceph rados objects as a Linux block device
6 based on drivers/block/osdblk.c:
8 Copyright 2009 Red Hat, Inc.
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program; see the file COPYING. If not, write to
21 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 For usage instructions, please refer to:
27 Documentation/ABI/testing/sysfs-bus-rbd
31 #include <linux/ceph/libceph.h>
32 #include <linux/ceph/osd_client.h>
33 #include <linux/ceph/mon_client.h>
34 #include <linux/ceph/cls_lock_client.h>
35 #include <linux/ceph/decode.h>
36 #include <linux/parser.h>
37 #include <linux/bsearch.h>
39 #include <linux/kernel.h>
40 #include <linux/device.h>
41 #include <linux/module.h>
42 #include <linux/blk-mq.h>
44 #include <linux/blkdev.h>
45 #include <linux/slab.h>
46 #include <linux/idr.h>
47 #include <linux/workqueue.h>
49 #include "rbd_types.h"
51 #define RBD_DEBUG /* Activate rbd_assert() calls */
54 * The basic unit of block I/O is a sector. It is interpreted in a
55 * number of contexts in Linux (blk, bio, genhd), but the default is
56 * universally 512 bytes. These symbols are just slightly more
57 * meaningful than the bare numbers they represent.
59 #define SECTOR_SHIFT 9
60 #define SECTOR_SIZE (1ULL << SECTOR_SHIFT)
63 * Increment the given counter and return its updated value.
64 * If the counter is already 0 it will not be incremented.
65 * If the counter is already at its maximum value returns
66 * -EINVAL without updating it.
68 static int atomic_inc_return_safe(atomic_t *v)
72 counter = (unsigned int)__atomic_add_unless(v, 1, 0);
73 if (counter <= (unsigned int)INT_MAX)
81 /* Decrement the counter. Return the resulting value, or -EINVAL */
82 static int atomic_dec_return_safe(atomic_t *v)
86 counter = atomic_dec_return(v);
95 #define RBD_DRV_NAME "rbd"
97 #define RBD_MINORS_PER_MAJOR 256
98 #define RBD_SINGLE_MAJOR_PART_SHIFT 4
100 #define RBD_MAX_PARENT_CHAIN_LEN 16
102 #define RBD_SNAP_DEV_NAME_PREFIX "snap_"
103 #define RBD_MAX_SNAP_NAME_LEN \
104 (NAME_MAX - (sizeof (RBD_SNAP_DEV_NAME_PREFIX) - 1))
106 #define RBD_MAX_SNAP_COUNT 510 /* allows max snapc to fit in 4KB */
108 #define RBD_SNAP_HEAD_NAME "-"
110 #define BAD_SNAP_INDEX U32_MAX /* invalid index into snap array */
112 /* This allows a single page to hold an image name sent by OSD */
113 #define RBD_IMAGE_NAME_LEN_MAX (PAGE_SIZE - sizeof (__le32) - 1)
114 #define RBD_IMAGE_ID_LEN_MAX 64
116 #define RBD_OBJ_PREFIX_LEN_MAX 64
118 #define RBD_NOTIFY_TIMEOUT 5 /* seconds */
119 #define RBD_RETRY_DELAY msecs_to_jiffies(1000)
123 #define RBD_FEATURE_LAYERING (1<<0)
124 #define RBD_FEATURE_STRIPINGV2 (1<<1)
125 #define RBD_FEATURE_EXCLUSIVE_LOCK (1<<2)
126 #define RBD_FEATURES_ALL (RBD_FEATURE_LAYERING | \
127 RBD_FEATURE_STRIPINGV2 | \
128 RBD_FEATURE_EXCLUSIVE_LOCK)
130 /* Features supported by this (client software) implementation. */
132 #define RBD_FEATURES_SUPPORTED (RBD_FEATURES_ALL)
135 * An RBD device name will be "rbd#", where the "rbd" comes from
136 * RBD_DRV_NAME above, and # is a unique integer identifier.
138 #define DEV_NAME_LEN 32
141 * block device image metadata (in-memory version)
143 struct rbd_image_header {
144 /* These six fields never change for a given rbd image */
151 u64 features; /* Might be changeable someday? */
153 /* The remaining fields need to be updated occasionally */
155 struct ceph_snap_context *snapc;
156 char *snap_names; /* format 1 only */
157 u64 *snap_sizes; /* format 1 only */
161 * An rbd image specification.
163 * The tuple (pool_id, image_id, snap_id) is sufficient to uniquely
164 * identify an image. Each rbd_dev structure includes a pointer to
165 * an rbd_spec structure that encapsulates this identity.
167 * Each of the id's in an rbd_spec has an associated name. For a
168 * user-mapped image, the names are supplied and the id's associated
169 * with them are looked up. For a layered image, a parent image is
170 * defined by the tuple, and the names are looked up.
172 * An rbd_dev structure contains a parent_spec pointer which is
173 * non-null if the image it represents is a child in a layered
174 * image. This pointer will refer to the rbd_spec structure used
175 * by the parent rbd_dev for its own identity (i.e., the structure
176 * is shared between the parent and child).
178 * Since these structures are populated once, during the discovery
179 * phase of image construction, they are effectively immutable so
180 * we make no effort to synchronize access to them.
182 * Note that code herein does not assume the image name is known (it
183 * could be a null pointer).
187 const char *pool_name;
189 const char *image_id;
190 const char *image_name;
193 const char *snap_name;
199 * an instance of the client. multiple devices may share an rbd client.
202 struct ceph_client *client;
204 struct list_head node;
207 struct rbd_img_request;
208 typedef void (*rbd_img_callback_t)(struct rbd_img_request *);
210 #define BAD_WHICH U32_MAX /* Good which or bad which, which? */
212 struct rbd_obj_request;
213 typedef void (*rbd_obj_callback_t)(struct rbd_obj_request *);
215 enum obj_request_type {
216 OBJ_REQUEST_NODATA, OBJ_REQUEST_BIO, OBJ_REQUEST_PAGES
219 enum obj_operation_type {
226 OBJ_REQ_DONE, /* completion flag: not done = 0, done = 1 */
227 OBJ_REQ_IMG_DATA, /* object usage: standalone = 0, image = 1 */
228 OBJ_REQ_KNOWN, /* EXISTS flag valid: no = 0, yes = 1 */
229 OBJ_REQ_EXISTS, /* target exists: no = 0, yes = 1 */
232 struct rbd_obj_request {
233 const char *object_name;
234 u64 offset; /* object start byte */
235 u64 length; /* bytes from offset */
239 * An object request associated with an image will have its
240 * img_data flag set; a standalone object request will not.
242 * A standalone object request will have which == BAD_WHICH
243 * and a null obj_request pointer.
245 * An object request initiated in support of a layered image
246 * object (to check for its existence before a write) will
247 * have which == BAD_WHICH and a non-null obj_request pointer.
249 * Finally, an object request for rbd image data will have
250 * which != BAD_WHICH, and will have a non-null img_request
251 * pointer. The value of which will be in the range
252 * 0..(img_request->obj_request_count-1).
255 struct rbd_obj_request *obj_request; /* STAT op */
257 struct rbd_img_request *img_request;
259 /* links for img_request->obj_requests list */
260 struct list_head links;
263 u32 which; /* posn image request list */
265 enum obj_request_type type;
267 struct bio *bio_list;
273 struct page **copyup_pages;
274 u32 copyup_page_count;
276 struct ceph_osd_request *osd_req;
278 u64 xferred; /* bytes transferred */
281 rbd_obj_callback_t callback;
282 struct completion completion;
288 IMG_REQ_WRITE, /* I/O direction: read = 0, write = 1 */
289 IMG_REQ_CHILD, /* initiator: block = 0, child image = 1 */
290 IMG_REQ_LAYERED, /* ENOENT handling: normal = 0, layered = 1 */
291 IMG_REQ_DISCARD, /* discard: normal = 0, discard request = 1 */
294 struct rbd_img_request {
295 struct rbd_device *rbd_dev;
296 u64 offset; /* starting image byte offset */
297 u64 length; /* byte count from offset */
300 u64 snap_id; /* for reads */
301 struct ceph_snap_context *snapc; /* for writes */
304 struct request *rq; /* block request */
305 struct rbd_obj_request *obj_request; /* obj req initiator */
307 struct page **copyup_pages;
308 u32 copyup_page_count;
309 spinlock_t completion_lock;/* protects next_completion */
311 rbd_img_callback_t callback;
312 u64 xferred;/* aggregate bytes transferred */
313 int result; /* first nonzero obj_request result */
315 u32 obj_request_count;
316 struct list_head obj_requests; /* rbd_obj_request structs */
321 #define for_each_obj_request(ireq, oreq) \
322 list_for_each_entry(oreq, &(ireq)->obj_requests, links)
323 #define for_each_obj_request_from(ireq, oreq) \
324 list_for_each_entry_from(oreq, &(ireq)->obj_requests, links)
325 #define for_each_obj_request_safe(ireq, oreq, n) \
326 list_for_each_entry_safe_reverse(oreq, n, &(ireq)->obj_requests, links)
328 enum rbd_watch_state {
329 RBD_WATCH_STATE_UNREGISTERED,
330 RBD_WATCH_STATE_REGISTERED,
331 RBD_WATCH_STATE_ERROR,
334 enum rbd_lock_state {
335 RBD_LOCK_STATE_UNLOCKED,
336 RBD_LOCK_STATE_LOCKED,
337 RBD_LOCK_STATE_RELEASING,
340 /* WatchNotify::ClientId */
341 struct rbd_client_id {
356 int dev_id; /* blkdev unique id */
358 int major; /* blkdev assigned major */
360 struct gendisk *disk; /* blkdev's gendisk and rq */
362 u32 image_format; /* Either 1 or 2 */
363 struct rbd_client *rbd_client;
365 char name[DEV_NAME_LEN]; /* blkdev name, e.g. rbd3 */
367 spinlock_t lock; /* queue, flags, open_count */
369 struct rbd_image_header header;
370 unsigned long flags; /* possibly lock protected */
371 struct rbd_spec *spec;
372 struct rbd_options *opts;
373 char *config_info; /* add{,_single_major} string */
375 struct ceph_object_id header_oid;
376 struct ceph_object_locator header_oloc;
378 struct ceph_file_layout layout; /* used for all rbd requests */
380 struct mutex watch_mutex;
381 enum rbd_watch_state watch_state;
382 struct ceph_osd_linger_request *watch_handle;
384 struct delayed_work watch_dwork;
386 struct rw_semaphore lock_rwsem;
387 enum rbd_lock_state lock_state;
388 struct rbd_client_id owner_cid;
389 struct work_struct acquired_lock_work;
390 struct work_struct released_lock_work;
391 struct delayed_work lock_dwork;
392 struct work_struct unlock_work;
393 wait_queue_head_t lock_waitq;
395 struct workqueue_struct *task_wq;
397 struct rbd_spec *parent_spec;
400 struct rbd_device *parent;
402 /* Block layer tags. */
403 struct blk_mq_tag_set tag_set;
405 /* protects updating the header */
406 struct rw_semaphore header_rwsem;
408 struct rbd_mapping mapping;
410 struct list_head node;
414 unsigned long open_count; /* protected by lock */
418 * Flag bits for rbd_dev->flags. If atomicity is required,
419 * rbd_dev->lock is used to protect access.
421 * Currently, only the "removing" flag (which is coupled with the
422 * "open_count" field) requires atomic access.
425 RBD_DEV_FLAG_EXISTS, /* mapped snapshot has not been deleted */
426 RBD_DEV_FLAG_REMOVING, /* this mapping is being removed */
429 static DEFINE_MUTEX(client_mutex); /* Serialize client creation */
431 static LIST_HEAD(rbd_dev_list); /* devices */
432 static DEFINE_SPINLOCK(rbd_dev_list_lock);
434 static LIST_HEAD(rbd_client_list); /* clients */
435 static DEFINE_SPINLOCK(rbd_client_list_lock);
437 /* Slab caches for frequently-allocated structures */
439 static struct kmem_cache *rbd_img_request_cache;
440 static struct kmem_cache *rbd_obj_request_cache;
441 static struct kmem_cache *rbd_segment_name_cache;
443 static int rbd_major;
444 static DEFINE_IDA(rbd_dev_id_ida);
446 static struct workqueue_struct *rbd_wq;
449 * Default to false for now, as single-major requires >= 0.75 version of
450 * userspace rbd utility.
452 static bool single_major = false;
453 module_param(single_major, bool, S_IRUGO);
454 MODULE_PARM_DESC(single_major, "Use a single major number for all rbd devices (default: false)");
456 static int rbd_img_request_submit(struct rbd_img_request *img_request);
458 static ssize_t rbd_add(struct bus_type *bus, const char *buf,
460 static ssize_t rbd_remove(struct bus_type *bus, const char *buf,
462 static ssize_t rbd_add_single_major(struct bus_type *bus, const char *buf,
464 static ssize_t rbd_remove_single_major(struct bus_type *bus, const char *buf,
466 static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth);
467 static void rbd_spec_put(struct rbd_spec *spec);
469 static int rbd_dev_id_to_minor(int dev_id)
471 return dev_id << RBD_SINGLE_MAJOR_PART_SHIFT;
474 static int minor_to_rbd_dev_id(int minor)
476 return minor >> RBD_SINGLE_MAJOR_PART_SHIFT;
479 static bool rbd_is_lock_supported(struct rbd_device *rbd_dev)
481 return (rbd_dev->header.features & RBD_FEATURE_EXCLUSIVE_LOCK) &&
482 rbd_dev->spec->snap_id == CEPH_NOSNAP &&
483 !rbd_dev->mapping.read_only;
486 static bool __rbd_is_lock_owner(struct rbd_device *rbd_dev)
488 return rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED ||
489 rbd_dev->lock_state == RBD_LOCK_STATE_RELEASING;
492 static bool rbd_is_lock_owner(struct rbd_device *rbd_dev)
496 down_read(&rbd_dev->lock_rwsem);
497 is_lock_owner = __rbd_is_lock_owner(rbd_dev);
498 up_read(&rbd_dev->lock_rwsem);
499 return is_lock_owner;
502 static BUS_ATTR(add, S_IWUSR, NULL, rbd_add);
503 static BUS_ATTR(remove, S_IWUSR, NULL, rbd_remove);
504 static BUS_ATTR(add_single_major, S_IWUSR, NULL, rbd_add_single_major);
505 static BUS_ATTR(remove_single_major, S_IWUSR, NULL, rbd_remove_single_major);
507 static struct attribute *rbd_bus_attrs[] = {
509 &bus_attr_remove.attr,
510 &bus_attr_add_single_major.attr,
511 &bus_attr_remove_single_major.attr,
515 static umode_t rbd_bus_is_visible(struct kobject *kobj,
516 struct attribute *attr, int index)
519 (attr == &bus_attr_add_single_major.attr ||
520 attr == &bus_attr_remove_single_major.attr))
526 static const struct attribute_group rbd_bus_group = {
527 .attrs = rbd_bus_attrs,
528 .is_visible = rbd_bus_is_visible,
530 __ATTRIBUTE_GROUPS(rbd_bus);
532 static struct bus_type rbd_bus_type = {
534 .bus_groups = rbd_bus_groups,
537 static void rbd_root_dev_release(struct device *dev)
541 static struct device rbd_root_dev = {
543 .release = rbd_root_dev_release,
546 static __printf(2, 3)
547 void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...)
549 struct va_format vaf;
557 printk(KERN_WARNING "%s: %pV\n", RBD_DRV_NAME, &vaf);
558 else if (rbd_dev->disk)
559 printk(KERN_WARNING "%s: %s: %pV\n",
560 RBD_DRV_NAME, rbd_dev->disk->disk_name, &vaf);
561 else if (rbd_dev->spec && rbd_dev->spec->image_name)
562 printk(KERN_WARNING "%s: image %s: %pV\n",
563 RBD_DRV_NAME, rbd_dev->spec->image_name, &vaf);
564 else if (rbd_dev->spec && rbd_dev->spec->image_id)
565 printk(KERN_WARNING "%s: id %s: %pV\n",
566 RBD_DRV_NAME, rbd_dev->spec->image_id, &vaf);
568 printk(KERN_WARNING "%s: rbd_dev %p: %pV\n",
569 RBD_DRV_NAME, rbd_dev, &vaf);
574 #define rbd_assert(expr) \
575 if (unlikely(!(expr))) { \
576 printk(KERN_ERR "\nAssertion failure in %s() " \
578 "\trbd_assert(%s);\n\n", \
579 __func__, __LINE__, #expr); \
582 #else /* !RBD_DEBUG */
583 # define rbd_assert(expr) ((void) 0)
584 #endif /* !RBD_DEBUG */
586 static void rbd_osd_copyup_callback(struct rbd_obj_request *obj_request);
587 static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request);
588 static void rbd_img_parent_read(struct rbd_obj_request *obj_request);
589 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
591 static int rbd_dev_refresh(struct rbd_device *rbd_dev);
592 static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev);
593 static int rbd_dev_header_info(struct rbd_device *rbd_dev);
594 static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev);
595 static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
597 static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
598 u8 *order, u64 *snap_size);
599 static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
602 static int rbd_open(struct block_device *bdev, fmode_t mode)
604 struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
605 bool removing = false;
607 if ((mode & FMODE_WRITE) && rbd_dev->mapping.read_only)
610 spin_lock_irq(&rbd_dev->lock);
611 if (test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags))
614 rbd_dev->open_count++;
615 spin_unlock_irq(&rbd_dev->lock);
619 (void) get_device(&rbd_dev->dev);
624 static void rbd_release(struct gendisk *disk, fmode_t mode)
626 struct rbd_device *rbd_dev = disk->private_data;
627 unsigned long open_count_before;
629 spin_lock_irq(&rbd_dev->lock);
630 open_count_before = rbd_dev->open_count--;
631 spin_unlock_irq(&rbd_dev->lock);
632 rbd_assert(open_count_before > 0);
634 put_device(&rbd_dev->dev);
637 static int rbd_ioctl_set_ro(struct rbd_device *rbd_dev, unsigned long arg)
642 bool ro_changed = false;
644 /* get_user() may sleep, so call it before taking rbd_dev->lock */
645 if (get_user(val, (int __user *)(arg)))
648 ro = val ? true : false;
649 /* Snapshot doesn't allow to write*/
650 if (rbd_dev->spec->snap_id != CEPH_NOSNAP && !ro)
653 spin_lock_irq(&rbd_dev->lock);
654 /* prevent others open this device */
655 if (rbd_dev->open_count > 1) {
660 if (rbd_dev->mapping.read_only != ro) {
661 rbd_dev->mapping.read_only = ro;
666 spin_unlock_irq(&rbd_dev->lock);
667 /* set_disk_ro() may sleep, so call it after releasing rbd_dev->lock */
668 if (ret == 0 && ro_changed)
669 set_disk_ro(rbd_dev->disk, ro ? 1 : 0);
674 static int rbd_ioctl(struct block_device *bdev, fmode_t mode,
675 unsigned int cmd, unsigned long arg)
677 struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
682 ret = rbd_ioctl_set_ro(rbd_dev, arg);
692 static int rbd_compat_ioctl(struct block_device *bdev, fmode_t mode,
693 unsigned int cmd, unsigned long arg)
695 return rbd_ioctl(bdev, mode, cmd, arg);
697 #endif /* CONFIG_COMPAT */
699 static const struct block_device_operations rbd_bd_ops = {
700 .owner = THIS_MODULE,
702 .release = rbd_release,
705 .compat_ioctl = rbd_compat_ioctl,
710 * Initialize an rbd client instance. Success or not, this function
711 * consumes ceph_opts. Caller holds client_mutex.
713 static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts)
715 struct rbd_client *rbdc;
718 dout("%s:\n", __func__);
719 rbdc = kmalloc(sizeof(struct rbd_client), GFP_KERNEL);
723 kref_init(&rbdc->kref);
724 INIT_LIST_HEAD(&rbdc->node);
726 rbdc->client = ceph_create_client(ceph_opts, rbdc, 0, 0);
727 if (IS_ERR(rbdc->client))
729 ceph_opts = NULL; /* Now rbdc->client is responsible for ceph_opts */
731 ret = ceph_open_session(rbdc->client);
735 spin_lock(&rbd_client_list_lock);
736 list_add_tail(&rbdc->node, &rbd_client_list);
737 spin_unlock(&rbd_client_list_lock);
739 dout("%s: rbdc %p\n", __func__, rbdc);
743 ceph_destroy_client(rbdc->client);
748 ceph_destroy_options(ceph_opts);
749 dout("%s: error %d\n", __func__, ret);
754 static struct rbd_client *__rbd_get_client(struct rbd_client *rbdc)
756 kref_get(&rbdc->kref);
762 * Find a ceph client with specific addr and configuration. If
763 * found, bump its reference count.
765 static struct rbd_client *rbd_client_find(struct ceph_options *ceph_opts)
767 struct rbd_client *client_node;
770 if (ceph_opts->flags & CEPH_OPT_NOSHARE)
773 spin_lock(&rbd_client_list_lock);
774 list_for_each_entry(client_node, &rbd_client_list, node) {
775 if (!ceph_compare_options(ceph_opts, client_node->client)) {
776 __rbd_get_client(client_node);
782 spin_unlock(&rbd_client_list_lock);
784 return found ? client_node : NULL;
788 * (Per device) rbd map options
795 /* string args above */
802 static match_table_t rbd_opts_tokens = {
803 {Opt_queue_depth, "queue_depth=%d"},
805 /* string args above */
806 {Opt_read_only, "read_only"},
807 {Opt_read_only, "ro"}, /* Alternate spelling */
808 {Opt_read_write, "read_write"},
809 {Opt_read_write, "rw"}, /* Alternate spelling */
810 {Opt_lock_on_read, "lock_on_read"},
820 #define RBD_QUEUE_DEPTH_DEFAULT BLKDEV_MAX_RQ
821 #define RBD_READ_ONLY_DEFAULT false
822 #define RBD_LOCK_ON_READ_DEFAULT false
824 static int parse_rbd_opts_token(char *c, void *private)
826 struct rbd_options *rbd_opts = private;
827 substring_t argstr[MAX_OPT_ARGS];
828 int token, intval, ret;
830 token = match_token(c, rbd_opts_tokens, argstr);
831 if (token < Opt_last_int) {
832 ret = match_int(&argstr[0], &intval);
834 pr_err("bad mount option arg (not int) at '%s'\n", c);
837 dout("got int token %d val %d\n", token, intval);
838 } else if (token > Opt_last_int && token < Opt_last_string) {
839 dout("got string token %d val %s\n", token, argstr[0].from);
841 dout("got token %d\n", token);
845 case Opt_queue_depth:
847 pr_err("queue_depth out of range\n");
850 rbd_opts->queue_depth = intval;
853 rbd_opts->read_only = true;
856 rbd_opts->read_only = false;
858 case Opt_lock_on_read:
859 rbd_opts->lock_on_read = true;
862 /* libceph prints "bad option" msg */
869 static char* obj_op_name(enum obj_operation_type op_type)
884 * Get a ceph client with specific addr and configuration, if one does
885 * not exist create it. Either way, ceph_opts is consumed by this
888 static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts)
890 struct rbd_client *rbdc;
892 mutex_lock_nested(&client_mutex, SINGLE_DEPTH_NESTING);
893 rbdc = rbd_client_find(ceph_opts);
894 if (rbdc) /* using an existing client */
895 ceph_destroy_options(ceph_opts);
897 rbdc = rbd_client_create(ceph_opts);
898 mutex_unlock(&client_mutex);
904 * Destroy ceph client
906 * Caller must hold rbd_client_list_lock.
908 static void rbd_client_release(struct kref *kref)
910 struct rbd_client *rbdc = container_of(kref, struct rbd_client, kref);
912 dout("%s: rbdc %p\n", __func__, rbdc);
913 spin_lock(&rbd_client_list_lock);
914 list_del(&rbdc->node);
915 spin_unlock(&rbd_client_list_lock);
917 ceph_destroy_client(rbdc->client);
922 * Drop reference to ceph client node. If it's not referenced anymore, release
925 static void rbd_put_client(struct rbd_client *rbdc)
928 kref_put(&rbdc->kref, rbd_client_release);
931 static bool rbd_image_format_valid(u32 image_format)
933 return image_format == 1 || image_format == 2;
936 static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk *ondisk)
941 /* The header has to start with the magic rbd header text */
942 if (memcmp(&ondisk->text, RBD_HEADER_TEXT, sizeof (RBD_HEADER_TEXT)))
945 /* The bio layer requires at least sector-sized I/O */
947 if (ondisk->options.order < SECTOR_SHIFT)
950 /* If we use u64 in a few spots we may be able to loosen this */
952 if (ondisk->options.order > 8 * sizeof (int) - 1)
956 * The size of a snapshot header has to fit in a size_t, and
957 * that limits the number of snapshots.
959 snap_count = le32_to_cpu(ondisk->snap_count);
960 size = SIZE_MAX - sizeof (struct ceph_snap_context);
961 if (snap_count > size / sizeof (__le64))
965 * Not only that, but the size of the entire the snapshot
966 * header must also be representable in a size_t.
968 size -= snap_count * sizeof (__le64);
969 if ((u64) size < le64_to_cpu(ondisk->snap_names_len))
976 * Fill an rbd image header with information from the given format 1
979 static int rbd_header_from_disk(struct rbd_device *rbd_dev,
980 struct rbd_image_header_ondisk *ondisk)
982 struct rbd_image_header *header = &rbd_dev->header;
983 bool first_time = header->object_prefix == NULL;
984 struct ceph_snap_context *snapc;
985 char *object_prefix = NULL;
986 char *snap_names = NULL;
987 u64 *snap_sizes = NULL;
993 /* Allocate this now to avoid having to handle failure below */
998 len = strnlen(ondisk->object_prefix,
999 sizeof (ondisk->object_prefix));
1000 object_prefix = kmalloc(len + 1, GFP_KERNEL);
1003 memcpy(object_prefix, ondisk->object_prefix, len);
1004 object_prefix[len] = '\0';
1007 /* Allocate the snapshot context and fill it in */
1009 snap_count = le32_to_cpu(ondisk->snap_count);
1010 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
1013 snapc->seq = le64_to_cpu(ondisk->snap_seq);
1015 struct rbd_image_snap_ondisk *snaps;
1016 u64 snap_names_len = le64_to_cpu(ondisk->snap_names_len);
1018 /* We'll keep a copy of the snapshot names... */
1020 if (snap_names_len > (u64)SIZE_MAX)
1022 snap_names = kmalloc(snap_names_len, GFP_KERNEL);
1026 /* ...as well as the array of their sizes. */
1028 size = snap_count * sizeof (*header->snap_sizes);
1029 snap_sizes = kmalloc(size, GFP_KERNEL);
1034 * Copy the names, and fill in each snapshot's id
1037 * Note that rbd_dev_v1_header_info() guarantees the
1038 * ondisk buffer we're working with has
1039 * snap_names_len bytes beyond the end of the
1040 * snapshot id array, this memcpy() is safe.
1042 memcpy(snap_names, &ondisk->snaps[snap_count], snap_names_len);
1043 snaps = ondisk->snaps;
1044 for (i = 0; i < snap_count; i++) {
1045 snapc->snaps[i] = le64_to_cpu(snaps[i].id);
1046 snap_sizes[i] = le64_to_cpu(snaps[i].image_size);
1050 /* We won't fail any more, fill in the header */
1053 header->object_prefix = object_prefix;
1054 header->obj_order = ondisk->options.order;
1055 header->crypt_type = ondisk->options.crypt_type;
1056 header->comp_type = ondisk->options.comp_type;
1057 /* The rest aren't used for format 1 images */
1058 header->stripe_unit = 0;
1059 header->stripe_count = 0;
1060 header->features = 0;
1062 ceph_put_snap_context(header->snapc);
1063 kfree(header->snap_names);
1064 kfree(header->snap_sizes);
1067 /* The remaining fields always get updated (when we refresh) */
1069 header->image_size = le64_to_cpu(ondisk->image_size);
1070 header->snapc = snapc;
1071 header->snap_names = snap_names;
1072 header->snap_sizes = snap_sizes;
1080 ceph_put_snap_context(snapc);
1081 kfree(object_prefix);
1086 static const char *_rbd_dev_v1_snap_name(struct rbd_device *rbd_dev, u32 which)
1088 const char *snap_name;
1090 rbd_assert(which < rbd_dev->header.snapc->num_snaps);
1092 /* Skip over names until we find the one we are looking for */
1094 snap_name = rbd_dev->header.snap_names;
1096 snap_name += strlen(snap_name) + 1;
1098 return kstrdup(snap_name, GFP_KERNEL);
1102 * Snapshot id comparison function for use with qsort()/bsearch().
1103 * Note that result is for snapshots in *descending* order.
1105 static int snapid_compare_reverse(const void *s1, const void *s2)
1107 u64 snap_id1 = *(u64 *)s1;
1108 u64 snap_id2 = *(u64 *)s2;
1110 if (snap_id1 < snap_id2)
1112 return snap_id1 == snap_id2 ? 0 : -1;
1116 * Search a snapshot context to see if the given snapshot id is
1119 * Returns the position of the snapshot id in the array if it's found,
1120 * or BAD_SNAP_INDEX otherwise.
1122 * Note: The snapshot array is in kept sorted (by the osd) in
1123 * reverse order, highest snapshot id first.
1125 static u32 rbd_dev_snap_index(struct rbd_device *rbd_dev, u64 snap_id)
1127 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
1130 found = bsearch(&snap_id, &snapc->snaps, snapc->num_snaps,
1131 sizeof (snap_id), snapid_compare_reverse);
1133 return found ? (u32)(found - &snapc->snaps[0]) : BAD_SNAP_INDEX;
1136 static const char *rbd_dev_v1_snap_name(struct rbd_device *rbd_dev,
1140 const char *snap_name;
1142 which = rbd_dev_snap_index(rbd_dev, snap_id);
1143 if (which == BAD_SNAP_INDEX)
1144 return ERR_PTR(-ENOENT);
1146 snap_name = _rbd_dev_v1_snap_name(rbd_dev, which);
1147 return snap_name ? snap_name : ERR_PTR(-ENOMEM);
1150 static const char *rbd_snap_name(struct rbd_device *rbd_dev, u64 snap_id)
1152 if (snap_id == CEPH_NOSNAP)
1153 return RBD_SNAP_HEAD_NAME;
1155 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1156 if (rbd_dev->image_format == 1)
1157 return rbd_dev_v1_snap_name(rbd_dev, snap_id);
1159 return rbd_dev_v2_snap_name(rbd_dev, snap_id);
1162 static int rbd_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
1165 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1166 if (snap_id == CEPH_NOSNAP) {
1167 *snap_size = rbd_dev->header.image_size;
1168 } else if (rbd_dev->image_format == 1) {
1171 which = rbd_dev_snap_index(rbd_dev, snap_id);
1172 if (which == BAD_SNAP_INDEX)
1175 *snap_size = rbd_dev->header.snap_sizes[which];
1180 ret = _rbd_dev_v2_snap_size(rbd_dev, snap_id, NULL, &size);
1189 static int rbd_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
1192 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1193 if (snap_id == CEPH_NOSNAP) {
1194 *snap_features = rbd_dev->header.features;
1195 } else if (rbd_dev->image_format == 1) {
1196 *snap_features = 0; /* No features for format 1 */
1201 ret = _rbd_dev_v2_snap_features(rbd_dev, snap_id, &features);
1205 *snap_features = features;
1210 static int rbd_dev_mapping_set(struct rbd_device *rbd_dev)
1212 u64 snap_id = rbd_dev->spec->snap_id;
1217 ret = rbd_snap_size(rbd_dev, snap_id, &size);
1220 ret = rbd_snap_features(rbd_dev, snap_id, &features);
1224 rbd_dev->mapping.size = size;
1225 rbd_dev->mapping.features = features;
1230 static void rbd_dev_mapping_clear(struct rbd_device *rbd_dev)
1232 rbd_dev->mapping.size = 0;
1233 rbd_dev->mapping.features = 0;
1236 static void rbd_segment_name_free(const char *name)
1238 /* The explicit cast here is needed to drop the const qualifier */
1240 kmem_cache_free(rbd_segment_name_cache, (void *)name);
1243 static const char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset)
1250 name = kmem_cache_alloc(rbd_segment_name_cache, GFP_NOIO);
1253 segment = offset >> rbd_dev->header.obj_order;
1254 name_format = "%s.%012llx";
1255 if (rbd_dev->image_format == 2)
1256 name_format = "%s.%016llx";
1257 ret = snprintf(name, CEPH_MAX_OID_NAME_LEN + 1, name_format,
1258 rbd_dev->header.object_prefix, segment);
1259 if (ret < 0 || ret > CEPH_MAX_OID_NAME_LEN) {
1260 pr_err("error formatting segment name for #%llu (%d)\n",
1262 rbd_segment_name_free(name);
1269 static u64 rbd_segment_offset(struct rbd_device *rbd_dev, u64 offset)
1271 u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
1273 return offset & (segment_size - 1);
1276 static u64 rbd_segment_length(struct rbd_device *rbd_dev,
1277 u64 offset, u64 length)
1279 u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
1281 offset &= segment_size - 1;
1283 rbd_assert(length <= U64_MAX - offset);
1284 if (offset + length > segment_size)
1285 length = segment_size - offset;
1291 * returns the size of an object in the image
1293 static u64 rbd_obj_bytes(struct rbd_image_header *header)
1295 return 1 << header->obj_order;
1302 static void bio_chain_put(struct bio *chain)
1308 chain = chain->bi_next;
1314 * zeros a bio chain, starting at specific offset
1316 static void zero_bio_chain(struct bio *chain, int start_ofs)
1319 struct bvec_iter iter;
1320 unsigned long flags;
1325 bio_for_each_segment(bv, chain, iter) {
1326 if (pos + bv.bv_len > start_ofs) {
1327 int remainder = max(start_ofs - pos, 0);
1328 buf = bvec_kmap_irq(&bv, &flags);
1329 memset(buf + remainder, 0,
1330 bv.bv_len - remainder);
1331 flush_dcache_page(bv.bv_page);
1332 bvec_kunmap_irq(buf, &flags);
1337 chain = chain->bi_next;
1342 * similar to zero_bio_chain(), zeros data defined by a page array,
1343 * starting at the given byte offset from the start of the array and
1344 * continuing up to the given end offset. The pages array is
1345 * assumed to be big enough to hold all bytes up to the end.
1347 static void zero_pages(struct page **pages, u64 offset, u64 end)
1349 struct page **page = &pages[offset >> PAGE_SHIFT];
1351 rbd_assert(end > offset);
1352 rbd_assert(end - offset <= (u64)SIZE_MAX);
1353 while (offset < end) {
1356 unsigned long flags;
1359 page_offset = offset & ~PAGE_MASK;
1360 length = min_t(size_t, PAGE_SIZE - page_offset, end - offset);
1361 local_irq_save(flags);
1362 kaddr = kmap_atomic(*page);
1363 memset(kaddr + page_offset, 0, length);
1364 flush_dcache_page(*page);
1365 kunmap_atomic(kaddr);
1366 local_irq_restore(flags);
1374 * Clone a portion of a bio, starting at the given byte offset
1375 * and continuing for the number of bytes indicated.
1377 static struct bio *bio_clone_range(struct bio *bio_src,
1378 unsigned int offset,
1384 bio = bio_clone(bio_src, gfpmask);
1386 return NULL; /* ENOMEM */
1388 bio_advance(bio, offset);
1389 bio->bi_iter.bi_size = len;
1395 * Clone a portion of a bio chain, starting at the given byte offset
1396 * into the first bio in the source chain and continuing for the
1397 * number of bytes indicated. The result is another bio chain of
1398 * exactly the given length, or a null pointer on error.
1400 * The bio_src and offset parameters are both in-out. On entry they
1401 * refer to the first source bio and the offset into that bio where
1402 * the start of data to be cloned is located.
1404 * On return, bio_src is updated to refer to the bio in the source
1405 * chain that contains first un-cloned byte, and *offset will
1406 * contain the offset of that byte within that bio.
1408 static struct bio *bio_chain_clone_range(struct bio **bio_src,
1409 unsigned int *offset,
1413 struct bio *bi = *bio_src;
1414 unsigned int off = *offset;
1415 struct bio *chain = NULL;
1418 /* Build up a chain of clone bios up to the limit */
1420 if (!bi || off >= bi->bi_iter.bi_size || !len)
1421 return NULL; /* Nothing to clone */
1425 unsigned int bi_size;
1429 rbd_warn(NULL, "bio_chain exhausted with %u left", len);
1430 goto out_err; /* EINVAL; ran out of bio's */
1432 bi_size = min_t(unsigned int, bi->bi_iter.bi_size - off, len);
1433 bio = bio_clone_range(bi, off, bi_size, gfpmask);
1435 goto out_err; /* ENOMEM */
1438 end = &bio->bi_next;
1441 if (off == bi->bi_iter.bi_size) {
1452 bio_chain_put(chain);
1458 * The default/initial value for all object request flags is 0. For
1459 * each flag, once its value is set to 1 it is never reset to 0
1462 static void obj_request_img_data_set(struct rbd_obj_request *obj_request)
1464 if (test_and_set_bit(OBJ_REQ_IMG_DATA, &obj_request->flags)) {
1465 struct rbd_device *rbd_dev;
1467 rbd_dev = obj_request->img_request->rbd_dev;
1468 rbd_warn(rbd_dev, "obj_request %p already marked img_data",
1473 static bool obj_request_img_data_test(struct rbd_obj_request *obj_request)
1476 return test_bit(OBJ_REQ_IMG_DATA, &obj_request->flags) != 0;
1479 static void obj_request_done_set(struct rbd_obj_request *obj_request)
1481 if (test_and_set_bit(OBJ_REQ_DONE, &obj_request->flags)) {
1482 struct rbd_device *rbd_dev = NULL;
1484 if (obj_request_img_data_test(obj_request))
1485 rbd_dev = obj_request->img_request->rbd_dev;
1486 rbd_warn(rbd_dev, "obj_request %p already marked done",
1491 static bool obj_request_done_test(struct rbd_obj_request *obj_request)
1494 return test_bit(OBJ_REQ_DONE, &obj_request->flags) != 0;
1498 * This sets the KNOWN flag after (possibly) setting the EXISTS
1499 * flag. The latter is set based on the "exists" value provided.
1501 * Note that for our purposes once an object exists it never goes
1502 * away again. It's possible that the response from two existence
1503 * checks are separated by the creation of the target object, and
1504 * the first ("doesn't exist") response arrives *after* the second
1505 * ("does exist"). In that case we ignore the second one.
1507 static void obj_request_existence_set(struct rbd_obj_request *obj_request,
1511 set_bit(OBJ_REQ_EXISTS, &obj_request->flags);
1512 set_bit(OBJ_REQ_KNOWN, &obj_request->flags);
1516 static bool obj_request_known_test(struct rbd_obj_request *obj_request)
1519 return test_bit(OBJ_REQ_KNOWN, &obj_request->flags) != 0;
1522 static bool obj_request_exists_test(struct rbd_obj_request *obj_request)
1525 return test_bit(OBJ_REQ_EXISTS, &obj_request->flags) != 0;
1528 static bool obj_request_overlaps_parent(struct rbd_obj_request *obj_request)
1530 struct rbd_device *rbd_dev = obj_request->img_request->rbd_dev;
1532 return obj_request->img_offset <
1533 round_up(rbd_dev->parent_overlap, rbd_obj_bytes(&rbd_dev->header));
1536 static void rbd_obj_request_get(struct rbd_obj_request *obj_request)
1538 dout("%s: obj %p (was %d)\n", __func__, obj_request,
1539 atomic_read(&obj_request->kref.refcount));
1540 kref_get(&obj_request->kref);
1543 static void rbd_obj_request_destroy(struct kref *kref);
1544 static void rbd_obj_request_put(struct rbd_obj_request *obj_request)
1546 rbd_assert(obj_request != NULL);
1547 dout("%s: obj %p (was %d)\n", __func__, obj_request,
1548 atomic_read(&obj_request->kref.refcount));
1549 kref_put(&obj_request->kref, rbd_obj_request_destroy);
1552 static void rbd_img_request_get(struct rbd_img_request *img_request)
1554 dout("%s: img %p (was %d)\n", __func__, img_request,
1555 atomic_read(&img_request->kref.refcount));
1556 kref_get(&img_request->kref);
1559 static bool img_request_child_test(struct rbd_img_request *img_request);
1560 static void rbd_parent_request_destroy(struct kref *kref);
1561 static void rbd_img_request_destroy(struct kref *kref);
1562 static void rbd_img_request_put(struct rbd_img_request *img_request)
1564 rbd_assert(img_request != NULL);
1565 dout("%s: img %p (was %d)\n", __func__, img_request,
1566 atomic_read(&img_request->kref.refcount));
1567 if (img_request_child_test(img_request))
1568 kref_put(&img_request->kref, rbd_parent_request_destroy);
1570 kref_put(&img_request->kref, rbd_img_request_destroy);
1573 static inline void rbd_img_obj_request_add(struct rbd_img_request *img_request,
1574 struct rbd_obj_request *obj_request)
1576 rbd_assert(obj_request->img_request == NULL);
1578 /* Image request now owns object's original reference */
1579 obj_request->img_request = img_request;
1580 obj_request->which = img_request->obj_request_count;
1581 rbd_assert(!obj_request_img_data_test(obj_request));
1582 obj_request_img_data_set(obj_request);
1583 rbd_assert(obj_request->which != BAD_WHICH);
1584 img_request->obj_request_count++;
1585 list_add_tail(&obj_request->links, &img_request->obj_requests);
1586 dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
1587 obj_request->which);
1590 static inline void rbd_img_obj_request_del(struct rbd_img_request *img_request,
1591 struct rbd_obj_request *obj_request)
1593 rbd_assert(obj_request->which != BAD_WHICH);
1595 dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
1596 obj_request->which);
1597 list_del(&obj_request->links);
1598 rbd_assert(img_request->obj_request_count > 0);
1599 img_request->obj_request_count--;
1600 rbd_assert(obj_request->which == img_request->obj_request_count);
1601 obj_request->which = BAD_WHICH;
1602 rbd_assert(obj_request_img_data_test(obj_request));
1603 rbd_assert(obj_request->img_request == img_request);
1604 obj_request->img_request = NULL;
1605 obj_request->callback = NULL;
1606 rbd_obj_request_put(obj_request);
1609 static bool obj_request_type_valid(enum obj_request_type type)
1612 case OBJ_REQUEST_NODATA:
1613 case OBJ_REQUEST_BIO:
1614 case OBJ_REQUEST_PAGES:
1621 static int rbd_obj_request_submit(struct ceph_osd_client *osdc,
1622 struct rbd_obj_request *obj_request)
1624 dout("%s %p\n", __func__, obj_request);
1625 return ceph_osdc_start_request(osdc, obj_request->osd_req, false);
1628 static void rbd_obj_request_end(struct rbd_obj_request *obj_request)
1630 dout("%s %p\n", __func__, obj_request);
1631 ceph_osdc_cancel_request(obj_request->osd_req);
1635 * Wait for an object request to complete. If interrupted, cancel the
1636 * underlying osd request.
1638 * @timeout: in jiffies, 0 means "wait forever"
1640 static int __rbd_obj_request_wait(struct rbd_obj_request *obj_request,
1641 unsigned long timeout)
1645 dout("%s %p\n", __func__, obj_request);
1646 ret = wait_for_completion_interruptible_timeout(
1647 &obj_request->completion,
1648 ceph_timeout_jiffies(timeout));
1652 rbd_obj_request_end(obj_request);
1657 dout("%s %p ret %d\n", __func__, obj_request, (int)ret);
1661 static int rbd_obj_request_wait(struct rbd_obj_request *obj_request)
1663 return __rbd_obj_request_wait(obj_request, 0);
1666 static void rbd_img_request_complete(struct rbd_img_request *img_request)
1669 dout("%s: img %p\n", __func__, img_request);
1672 * If no error occurred, compute the aggregate transfer
1673 * count for the image request. We could instead use
1674 * atomic64_cmpxchg() to update it as each object request
1675 * completes; not clear which way is better off hand.
1677 if (!img_request->result) {
1678 struct rbd_obj_request *obj_request;
1681 for_each_obj_request(img_request, obj_request)
1682 xferred += obj_request->xferred;
1683 img_request->xferred = xferred;
1686 if (img_request->callback)
1687 img_request->callback(img_request);
1689 rbd_img_request_put(img_request);
1693 * The default/initial value for all image request flags is 0. Each
1694 * is conditionally set to 1 at image request initialization time
1695 * and currently never change thereafter.
1697 static void img_request_write_set(struct rbd_img_request *img_request)
1699 set_bit(IMG_REQ_WRITE, &img_request->flags);
1703 static bool img_request_write_test(struct rbd_img_request *img_request)
1706 return test_bit(IMG_REQ_WRITE, &img_request->flags) != 0;
1710 * Set the discard flag when the img_request is an discard request
1712 static void img_request_discard_set(struct rbd_img_request *img_request)
1714 set_bit(IMG_REQ_DISCARD, &img_request->flags);
1718 static bool img_request_discard_test(struct rbd_img_request *img_request)
1721 return test_bit(IMG_REQ_DISCARD, &img_request->flags) != 0;
1724 static void img_request_child_set(struct rbd_img_request *img_request)
1726 set_bit(IMG_REQ_CHILD, &img_request->flags);
1730 static void img_request_child_clear(struct rbd_img_request *img_request)
1732 clear_bit(IMG_REQ_CHILD, &img_request->flags);
1736 static bool img_request_child_test(struct rbd_img_request *img_request)
1739 return test_bit(IMG_REQ_CHILD, &img_request->flags) != 0;
1742 static void img_request_layered_set(struct rbd_img_request *img_request)
1744 set_bit(IMG_REQ_LAYERED, &img_request->flags);
1748 static void img_request_layered_clear(struct rbd_img_request *img_request)
1750 clear_bit(IMG_REQ_LAYERED, &img_request->flags);
1754 static bool img_request_layered_test(struct rbd_img_request *img_request)
1757 return test_bit(IMG_REQ_LAYERED, &img_request->flags) != 0;
1760 static enum obj_operation_type
1761 rbd_img_request_op_type(struct rbd_img_request *img_request)
1763 if (img_request_write_test(img_request))
1764 return OBJ_OP_WRITE;
1765 else if (img_request_discard_test(img_request))
1766 return OBJ_OP_DISCARD;
1772 rbd_img_obj_request_read_callback(struct rbd_obj_request *obj_request)
1774 u64 xferred = obj_request->xferred;
1775 u64 length = obj_request->length;
1777 dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
1778 obj_request, obj_request->img_request, obj_request->result,
1781 * ENOENT means a hole in the image. We zero-fill the entire
1782 * length of the request. A short read also implies zero-fill
1783 * to the end of the request. An error requires the whole
1784 * length of the request to be reported finished with an error
1785 * to the block layer. In each case we update the xferred
1786 * count to indicate the whole request was satisfied.
1788 rbd_assert(obj_request->type != OBJ_REQUEST_NODATA);
1789 if (obj_request->result == -ENOENT) {
1790 if (obj_request->type == OBJ_REQUEST_BIO)
1791 zero_bio_chain(obj_request->bio_list, 0);
1793 zero_pages(obj_request->pages, 0, length);
1794 obj_request->result = 0;
1795 } else if (xferred < length && !obj_request->result) {
1796 if (obj_request->type == OBJ_REQUEST_BIO)
1797 zero_bio_chain(obj_request->bio_list, xferred);
1799 zero_pages(obj_request->pages, xferred, length);
1801 obj_request->xferred = length;
1802 obj_request_done_set(obj_request);
1805 static void rbd_obj_request_complete(struct rbd_obj_request *obj_request)
1807 dout("%s: obj %p cb %p\n", __func__, obj_request,
1808 obj_request->callback);
1809 if (obj_request->callback)
1810 obj_request->callback(obj_request);
1812 complete_all(&obj_request->completion);
1815 static void rbd_osd_read_callback(struct rbd_obj_request *obj_request)
1817 struct rbd_img_request *img_request = NULL;
1818 struct rbd_device *rbd_dev = NULL;
1819 bool layered = false;
1821 if (obj_request_img_data_test(obj_request)) {
1822 img_request = obj_request->img_request;
1823 layered = img_request && img_request_layered_test(img_request);
1824 rbd_dev = img_request->rbd_dev;
1827 dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
1828 obj_request, img_request, obj_request->result,
1829 obj_request->xferred, obj_request->length);
1830 if (layered && obj_request->result == -ENOENT &&
1831 obj_request->img_offset < rbd_dev->parent_overlap)
1832 rbd_img_parent_read(obj_request);
1833 else if (img_request)
1834 rbd_img_obj_request_read_callback(obj_request);
1836 obj_request_done_set(obj_request);
1839 static void rbd_osd_write_callback(struct rbd_obj_request *obj_request)
1841 dout("%s: obj %p result %d %llu\n", __func__, obj_request,
1842 obj_request->result, obj_request->length);
1844 * There is no such thing as a successful short write. Set
1845 * it to our originally-requested length.
1847 obj_request->xferred = obj_request->length;
1848 obj_request_done_set(obj_request);
1851 static void rbd_osd_discard_callback(struct rbd_obj_request *obj_request)
1853 dout("%s: obj %p result %d %llu\n", __func__, obj_request,
1854 obj_request->result, obj_request->length);
1856 * There is no such thing as a successful short discard. Set
1857 * it to our originally-requested length.
1859 obj_request->xferred = obj_request->length;
1860 /* discarding a non-existent object is not a problem */
1861 if (obj_request->result == -ENOENT)
1862 obj_request->result = 0;
1863 obj_request_done_set(obj_request);
1867 * For a simple stat call there's nothing to do. We'll do more if
1868 * this is part of a write sequence for a layered image.
1870 static void rbd_osd_stat_callback(struct rbd_obj_request *obj_request)
1872 dout("%s: obj %p\n", __func__, obj_request);
1873 obj_request_done_set(obj_request);
1876 static void rbd_osd_call_callback(struct rbd_obj_request *obj_request)
1878 dout("%s: obj %p\n", __func__, obj_request);
1880 if (obj_request_img_data_test(obj_request))
1881 rbd_osd_copyup_callback(obj_request);
1883 obj_request_done_set(obj_request);
1886 static void rbd_osd_req_callback(struct ceph_osd_request *osd_req)
1888 struct rbd_obj_request *obj_request = osd_req->r_priv;
1891 dout("%s: osd_req %p\n", __func__, osd_req);
1892 rbd_assert(osd_req == obj_request->osd_req);
1893 if (obj_request_img_data_test(obj_request)) {
1894 rbd_assert(obj_request->img_request);
1895 rbd_assert(obj_request->which != BAD_WHICH);
1897 rbd_assert(obj_request->which == BAD_WHICH);
1900 if (osd_req->r_result < 0)
1901 obj_request->result = osd_req->r_result;
1904 * We support a 64-bit length, but ultimately it has to be
1905 * passed to the block layer, which just supports a 32-bit
1908 obj_request->xferred = osd_req->r_ops[0].outdata_len;
1909 rbd_assert(obj_request->xferred < (u64)UINT_MAX);
1911 opcode = osd_req->r_ops[0].op;
1913 case CEPH_OSD_OP_READ:
1914 rbd_osd_read_callback(obj_request);
1916 case CEPH_OSD_OP_SETALLOCHINT:
1917 rbd_assert(osd_req->r_ops[1].op == CEPH_OSD_OP_WRITE ||
1918 osd_req->r_ops[1].op == CEPH_OSD_OP_WRITEFULL);
1920 case CEPH_OSD_OP_WRITE:
1921 case CEPH_OSD_OP_WRITEFULL:
1922 rbd_osd_write_callback(obj_request);
1924 case CEPH_OSD_OP_STAT:
1925 rbd_osd_stat_callback(obj_request);
1927 case CEPH_OSD_OP_DELETE:
1928 case CEPH_OSD_OP_TRUNCATE:
1929 case CEPH_OSD_OP_ZERO:
1930 rbd_osd_discard_callback(obj_request);
1932 case CEPH_OSD_OP_CALL:
1933 rbd_osd_call_callback(obj_request);
1936 rbd_warn(NULL, "%s: unsupported op %hu",
1937 obj_request->object_name, (unsigned short) opcode);
1941 if (obj_request_done_test(obj_request))
1942 rbd_obj_request_complete(obj_request);
1945 static void rbd_osd_req_format_read(struct rbd_obj_request *obj_request)
1947 struct rbd_img_request *img_request = obj_request->img_request;
1948 struct ceph_osd_request *osd_req = obj_request->osd_req;
1951 osd_req->r_snapid = img_request->snap_id;
1954 static void rbd_osd_req_format_write(struct rbd_obj_request *obj_request)
1956 struct ceph_osd_request *osd_req = obj_request->osd_req;
1958 osd_req->r_mtime = CURRENT_TIME;
1959 osd_req->r_data_offset = obj_request->offset;
1963 * Create an osd request. A read request has one osd op (read).
1964 * A write request has either one (watch) or two (hint+write) osd ops.
1965 * (All rbd data writes are prefixed with an allocation hint op, but
1966 * technically osd watch is a write request, hence this distinction.)
1968 static struct ceph_osd_request *rbd_osd_req_create(
1969 struct rbd_device *rbd_dev,
1970 enum obj_operation_type op_type,
1971 unsigned int num_ops,
1972 struct rbd_obj_request *obj_request)
1974 struct ceph_snap_context *snapc = NULL;
1975 struct ceph_osd_client *osdc;
1976 struct ceph_osd_request *osd_req;
1978 if (obj_request_img_data_test(obj_request) &&
1979 (op_type == OBJ_OP_DISCARD || op_type == OBJ_OP_WRITE)) {
1980 struct rbd_img_request *img_request = obj_request->img_request;
1981 if (op_type == OBJ_OP_WRITE) {
1982 rbd_assert(img_request_write_test(img_request));
1984 rbd_assert(img_request_discard_test(img_request));
1986 snapc = img_request->snapc;
1989 rbd_assert(num_ops == 1 || ((op_type == OBJ_OP_WRITE) && num_ops == 2));
1991 /* Allocate and initialize the request, for the num_ops ops */
1993 osdc = &rbd_dev->rbd_client->client->osdc;
1994 osd_req = ceph_osdc_alloc_request(osdc, snapc, num_ops, false,
1999 if (op_type == OBJ_OP_WRITE || op_type == OBJ_OP_DISCARD)
2000 osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
2002 osd_req->r_flags = CEPH_OSD_FLAG_READ;
2004 osd_req->r_callback = rbd_osd_req_callback;
2005 osd_req->r_priv = obj_request;
2007 osd_req->r_base_oloc.pool = rbd_dev->layout.pool_id;
2008 if (ceph_oid_aprintf(&osd_req->r_base_oid, GFP_NOIO, "%s",
2009 obj_request->object_name))
2012 if (ceph_osdc_alloc_messages(osd_req, GFP_NOIO))
2018 ceph_osdc_put_request(osd_req);
2023 * Create a copyup osd request based on the information in the object
2024 * request supplied. A copyup request has two or three osd ops, a
2025 * copyup method call, potentially a hint op, and a write or truncate
2028 static struct ceph_osd_request *
2029 rbd_osd_req_create_copyup(struct rbd_obj_request *obj_request)
2031 struct rbd_img_request *img_request;
2032 struct ceph_snap_context *snapc;
2033 struct rbd_device *rbd_dev;
2034 struct ceph_osd_client *osdc;
2035 struct ceph_osd_request *osd_req;
2036 int num_osd_ops = 3;
2038 rbd_assert(obj_request_img_data_test(obj_request));
2039 img_request = obj_request->img_request;
2040 rbd_assert(img_request);
2041 rbd_assert(img_request_write_test(img_request) ||
2042 img_request_discard_test(img_request));
2044 if (img_request_discard_test(img_request))
2047 /* Allocate and initialize the request, for all the ops */
2049 snapc = img_request->snapc;
2050 rbd_dev = img_request->rbd_dev;
2051 osdc = &rbd_dev->rbd_client->client->osdc;
2052 osd_req = ceph_osdc_alloc_request(osdc, snapc, num_osd_ops,
2057 osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
2058 osd_req->r_callback = rbd_osd_req_callback;
2059 osd_req->r_priv = obj_request;
2061 osd_req->r_base_oloc.pool = rbd_dev->layout.pool_id;
2062 if (ceph_oid_aprintf(&osd_req->r_base_oid, GFP_NOIO, "%s",
2063 obj_request->object_name))
2066 if (ceph_osdc_alloc_messages(osd_req, GFP_NOIO))
2072 ceph_osdc_put_request(osd_req);
2077 static void rbd_osd_req_destroy(struct ceph_osd_request *osd_req)
2079 ceph_osdc_put_request(osd_req);
2082 /* object_name is assumed to be a non-null pointer and NUL-terminated */
2084 static struct rbd_obj_request *rbd_obj_request_create(const char *object_name,
2085 u64 offset, u64 length,
2086 enum obj_request_type type)
2088 struct rbd_obj_request *obj_request;
2092 rbd_assert(obj_request_type_valid(type));
2094 size = strlen(object_name) + 1;
2095 name = kmalloc(size, GFP_NOIO);
2099 obj_request = kmem_cache_zalloc(rbd_obj_request_cache, GFP_NOIO);
2105 obj_request->object_name = memcpy(name, object_name, size);
2106 obj_request->offset = offset;
2107 obj_request->length = length;
2108 obj_request->flags = 0;
2109 obj_request->which = BAD_WHICH;
2110 obj_request->type = type;
2111 INIT_LIST_HEAD(&obj_request->links);
2112 init_completion(&obj_request->completion);
2113 kref_init(&obj_request->kref);
2115 dout("%s: \"%s\" %llu/%llu %d -> obj %p\n", __func__, object_name,
2116 offset, length, (int)type, obj_request);
2121 static void rbd_obj_request_destroy(struct kref *kref)
2123 struct rbd_obj_request *obj_request;
2125 obj_request = container_of(kref, struct rbd_obj_request, kref);
2127 dout("%s: obj %p\n", __func__, obj_request);
2129 rbd_assert(obj_request->img_request == NULL);
2130 rbd_assert(obj_request->which == BAD_WHICH);
2132 if (obj_request->osd_req)
2133 rbd_osd_req_destroy(obj_request->osd_req);
2135 rbd_assert(obj_request_type_valid(obj_request->type));
2136 switch (obj_request->type) {
2137 case OBJ_REQUEST_NODATA:
2138 break; /* Nothing to do */
2139 case OBJ_REQUEST_BIO:
2140 if (obj_request->bio_list)
2141 bio_chain_put(obj_request->bio_list);
2143 case OBJ_REQUEST_PAGES:
2144 if (obj_request->pages)
2145 ceph_release_page_vector(obj_request->pages,
2146 obj_request->page_count);
2150 kfree(obj_request->object_name);
2151 obj_request->object_name = NULL;
2152 kmem_cache_free(rbd_obj_request_cache, obj_request);
2155 /* It's OK to call this for a device with no parent */
2157 static void rbd_spec_put(struct rbd_spec *spec);
2158 static void rbd_dev_unparent(struct rbd_device *rbd_dev)
2160 rbd_dev_remove_parent(rbd_dev);
2161 rbd_spec_put(rbd_dev->parent_spec);
2162 rbd_dev->parent_spec = NULL;
2163 rbd_dev->parent_overlap = 0;
2167 * Parent image reference counting is used to determine when an
2168 * image's parent fields can be safely torn down--after there are no
2169 * more in-flight requests to the parent image. When the last
2170 * reference is dropped, cleaning them up is safe.
2172 static void rbd_dev_parent_put(struct rbd_device *rbd_dev)
2176 if (!rbd_dev->parent_spec)
2179 counter = atomic_dec_return_safe(&rbd_dev->parent_ref);
2183 /* Last reference; clean up parent data structures */
2186 rbd_dev_unparent(rbd_dev);
2188 rbd_warn(rbd_dev, "parent reference underflow");
2192 * If an image has a non-zero parent overlap, get a reference to its
2195 * Returns true if the rbd device has a parent with a non-zero
2196 * overlap and a reference for it was successfully taken, or
2199 static bool rbd_dev_parent_get(struct rbd_device *rbd_dev)
2203 if (!rbd_dev->parent_spec)
2206 down_read(&rbd_dev->header_rwsem);
2207 if (rbd_dev->parent_overlap)
2208 counter = atomic_inc_return_safe(&rbd_dev->parent_ref);
2209 up_read(&rbd_dev->header_rwsem);
2212 rbd_warn(rbd_dev, "parent reference overflow");
2218 * Caller is responsible for filling in the list of object requests
2219 * that comprises the image request, and the Linux request pointer
2220 * (if there is one).
2222 static struct rbd_img_request *rbd_img_request_create(
2223 struct rbd_device *rbd_dev,
2224 u64 offset, u64 length,
2225 enum obj_operation_type op_type,
2226 struct ceph_snap_context *snapc)
2228 struct rbd_img_request *img_request;
2230 img_request = kmem_cache_alloc(rbd_img_request_cache, GFP_NOIO);
2234 img_request->rq = NULL;
2235 img_request->rbd_dev = rbd_dev;
2236 img_request->offset = offset;
2237 img_request->length = length;
2238 img_request->flags = 0;
2239 if (op_type == OBJ_OP_DISCARD) {
2240 img_request_discard_set(img_request);
2241 img_request->snapc = snapc;
2242 } else if (op_type == OBJ_OP_WRITE) {
2243 img_request_write_set(img_request);
2244 img_request->snapc = snapc;
2246 img_request->snap_id = rbd_dev->spec->snap_id;
2248 if (rbd_dev_parent_get(rbd_dev))
2249 img_request_layered_set(img_request);
2250 spin_lock_init(&img_request->completion_lock);
2251 img_request->next_completion = 0;
2252 img_request->callback = NULL;
2253 img_request->result = 0;
2254 img_request->obj_request_count = 0;
2255 INIT_LIST_HEAD(&img_request->obj_requests);
2256 kref_init(&img_request->kref);
2258 dout("%s: rbd_dev %p %s %llu/%llu -> img %p\n", __func__, rbd_dev,
2259 obj_op_name(op_type), offset, length, img_request);
2264 static void rbd_img_request_destroy(struct kref *kref)
2266 struct rbd_img_request *img_request;
2267 struct rbd_obj_request *obj_request;
2268 struct rbd_obj_request *next_obj_request;
2270 img_request = container_of(kref, struct rbd_img_request, kref);
2272 dout("%s: img %p\n", __func__, img_request);
2274 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
2275 rbd_img_obj_request_del(img_request, obj_request);
2276 rbd_assert(img_request->obj_request_count == 0);
2278 if (img_request_layered_test(img_request)) {
2279 img_request_layered_clear(img_request);
2280 rbd_dev_parent_put(img_request->rbd_dev);
2283 if (img_request_write_test(img_request) ||
2284 img_request_discard_test(img_request))
2285 ceph_put_snap_context(img_request->snapc);
2287 kmem_cache_free(rbd_img_request_cache, img_request);
2290 static struct rbd_img_request *rbd_parent_request_create(
2291 struct rbd_obj_request *obj_request,
2292 u64 img_offset, u64 length)
2294 struct rbd_img_request *parent_request;
2295 struct rbd_device *rbd_dev;
2297 rbd_assert(obj_request->img_request);
2298 rbd_dev = obj_request->img_request->rbd_dev;
2300 parent_request = rbd_img_request_create(rbd_dev->parent, img_offset,
2301 length, OBJ_OP_READ, NULL);
2302 if (!parent_request)
2305 img_request_child_set(parent_request);
2306 rbd_obj_request_get(obj_request);
2307 parent_request->obj_request = obj_request;
2309 return parent_request;
2312 static void rbd_parent_request_destroy(struct kref *kref)
2314 struct rbd_img_request *parent_request;
2315 struct rbd_obj_request *orig_request;
2317 parent_request = container_of(kref, struct rbd_img_request, kref);
2318 orig_request = parent_request->obj_request;
2320 parent_request->obj_request = NULL;
2321 rbd_obj_request_put(orig_request);
2322 img_request_child_clear(parent_request);
2324 rbd_img_request_destroy(kref);
2327 static bool rbd_img_obj_end_request(struct rbd_obj_request *obj_request)
2329 struct rbd_img_request *img_request;
2330 unsigned int xferred;
2334 rbd_assert(obj_request_img_data_test(obj_request));
2335 img_request = obj_request->img_request;
2337 rbd_assert(obj_request->xferred <= (u64)UINT_MAX);
2338 xferred = (unsigned int)obj_request->xferred;
2339 result = obj_request->result;
2341 struct rbd_device *rbd_dev = img_request->rbd_dev;
2342 enum obj_operation_type op_type;
2344 if (img_request_discard_test(img_request))
2345 op_type = OBJ_OP_DISCARD;
2346 else if (img_request_write_test(img_request))
2347 op_type = OBJ_OP_WRITE;
2349 op_type = OBJ_OP_READ;
2351 rbd_warn(rbd_dev, "%s %llx at %llx (%llx)",
2352 obj_op_name(op_type), obj_request->length,
2353 obj_request->img_offset, obj_request->offset);
2354 rbd_warn(rbd_dev, " result %d xferred %x",
2356 if (!img_request->result)
2357 img_request->result = result;
2359 * Need to end I/O on the entire obj_request worth of
2360 * bytes in case of error.
2362 xferred = obj_request->length;
2365 /* Image object requests don't own their page array */
2367 if (obj_request->type == OBJ_REQUEST_PAGES) {
2368 obj_request->pages = NULL;
2369 obj_request->page_count = 0;
2372 if (img_request_child_test(img_request)) {
2373 rbd_assert(img_request->obj_request != NULL);
2374 more = obj_request->which < img_request->obj_request_count - 1;
2376 rbd_assert(img_request->rq != NULL);
2378 more = blk_update_request(img_request->rq, result, xferred);
2380 __blk_mq_end_request(img_request->rq, result);
2386 static void rbd_img_obj_callback(struct rbd_obj_request *obj_request)
2388 struct rbd_img_request *img_request;
2389 u32 which = obj_request->which;
2392 rbd_assert(obj_request_img_data_test(obj_request));
2393 img_request = obj_request->img_request;
2395 dout("%s: img %p obj %p\n", __func__, img_request, obj_request);
2396 rbd_assert(img_request != NULL);
2397 rbd_assert(img_request->obj_request_count > 0);
2398 rbd_assert(which != BAD_WHICH);
2399 rbd_assert(which < img_request->obj_request_count);
2401 spin_lock_irq(&img_request->completion_lock);
2402 if (which != img_request->next_completion)
2405 for_each_obj_request_from(img_request, obj_request) {
2407 rbd_assert(which < img_request->obj_request_count);
2409 if (!obj_request_done_test(obj_request))
2411 more = rbd_img_obj_end_request(obj_request);
2415 rbd_assert(more ^ (which == img_request->obj_request_count));
2416 img_request->next_completion = which;
2418 spin_unlock_irq(&img_request->completion_lock);
2419 rbd_img_request_put(img_request);
2422 rbd_img_request_complete(img_request);
2426 * Add individual osd ops to the given ceph_osd_request and prepare
2427 * them for submission. num_ops is the current number of
2428 * osd operations already to the object request.
2430 static void rbd_img_obj_request_fill(struct rbd_obj_request *obj_request,
2431 struct ceph_osd_request *osd_request,
2432 enum obj_operation_type op_type,
2433 unsigned int num_ops)
2435 struct rbd_img_request *img_request = obj_request->img_request;
2436 struct rbd_device *rbd_dev = img_request->rbd_dev;
2437 u64 object_size = rbd_obj_bytes(&rbd_dev->header);
2438 u64 offset = obj_request->offset;
2439 u64 length = obj_request->length;
2443 if (op_type == OBJ_OP_DISCARD) {
2444 if (!offset && length == object_size &&
2445 (!img_request_layered_test(img_request) ||
2446 !obj_request_overlaps_parent(obj_request))) {
2447 opcode = CEPH_OSD_OP_DELETE;
2448 } else if ((offset + length == object_size)) {
2449 opcode = CEPH_OSD_OP_TRUNCATE;
2451 down_read(&rbd_dev->header_rwsem);
2452 img_end = rbd_dev->header.image_size;
2453 up_read(&rbd_dev->header_rwsem);
2455 if (obj_request->img_offset + length == img_end)
2456 opcode = CEPH_OSD_OP_TRUNCATE;
2458 opcode = CEPH_OSD_OP_ZERO;
2460 } else if (op_type == OBJ_OP_WRITE) {
2461 if (!offset && length == object_size)
2462 opcode = CEPH_OSD_OP_WRITEFULL;
2464 opcode = CEPH_OSD_OP_WRITE;
2465 osd_req_op_alloc_hint_init(osd_request, num_ops,
2466 object_size, object_size);
2469 opcode = CEPH_OSD_OP_READ;
2472 if (opcode == CEPH_OSD_OP_DELETE)
2473 osd_req_op_init(osd_request, num_ops, opcode, 0);
2475 osd_req_op_extent_init(osd_request, num_ops, opcode,
2476 offset, length, 0, 0);
2478 if (obj_request->type == OBJ_REQUEST_BIO)
2479 osd_req_op_extent_osd_data_bio(osd_request, num_ops,
2480 obj_request->bio_list, length);
2481 else if (obj_request->type == OBJ_REQUEST_PAGES)
2482 osd_req_op_extent_osd_data_pages(osd_request, num_ops,
2483 obj_request->pages, length,
2484 offset & ~PAGE_MASK, false, false);
2486 /* Discards are also writes */
2487 if (op_type == OBJ_OP_WRITE || op_type == OBJ_OP_DISCARD)
2488 rbd_osd_req_format_write(obj_request);
2490 rbd_osd_req_format_read(obj_request);
2494 * Split up an image request into one or more object requests, each
2495 * to a different object. The "type" parameter indicates whether
2496 * "data_desc" is the pointer to the head of a list of bio
2497 * structures, or the base of a page array. In either case this
2498 * function assumes data_desc describes memory sufficient to hold
2499 * all data described by the image request.
2501 static int rbd_img_request_fill(struct rbd_img_request *img_request,
2502 enum obj_request_type type,
2505 struct rbd_device *rbd_dev = img_request->rbd_dev;
2506 struct rbd_obj_request *obj_request = NULL;
2507 struct rbd_obj_request *next_obj_request;
2508 struct bio *bio_list = NULL;
2509 unsigned int bio_offset = 0;
2510 struct page **pages = NULL;
2511 enum obj_operation_type op_type;
2515 dout("%s: img %p type %d data_desc %p\n", __func__, img_request,
2516 (int)type, data_desc);
2518 img_offset = img_request->offset;
2519 resid = img_request->length;
2520 rbd_assert(resid > 0);
2521 op_type = rbd_img_request_op_type(img_request);
2523 if (type == OBJ_REQUEST_BIO) {
2524 bio_list = data_desc;
2525 rbd_assert(img_offset ==
2526 bio_list->bi_iter.bi_sector << SECTOR_SHIFT);
2527 } else if (type == OBJ_REQUEST_PAGES) {
2532 struct ceph_osd_request *osd_req;
2533 const char *object_name;
2537 object_name = rbd_segment_name(rbd_dev, img_offset);
2540 offset = rbd_segment_offset(rbd_dev, img_offset);
2541 length = rbd_segment_length(rbd_dev, img_offset, resid);
2542 obj_request = rbd_obj_request_create(object_name,
2543 offset, length, type);
2544 /* object request has its own copy of the object name */
2545 rbd_segment_name_free(object_name);
2550 * set obj_request->img_request before creating the
2551 * osd_request so that it gets the right snapc
2553 rbd_img_obj_request_add(img_request, obj_request);
2555 if (type == OBJ_REQUEST_BIO) {
2556 unsigned int clone_size;
2558 rbd_assert(length <= (u64)UINT_MAX);
2559 clone_size = (unsigned int)length;
2560 obj_request->bio_list =
2561 bio_chain_clone_range(&bio_list,
2565 if (!obj_request->bio_list)
2567 } else if (type == OBJ_REQUEST_PAGES) {
2568 unsigned int page_count;
2570 obj_request->pages = pages;
2571 page_count = (u32)calc_pages_for(offset, length);
2572 obj_request->page_count = page_count;
2573 if ((offset + length) & ~PAGE_MASK)
2574 page_count--; /* more on last page */
2575 pages += page_count;
2578 osd_req = rbd_osd_req_create(rbd_dev, op_type,
2579 (op_type == OBJ_OP_WRITE) ? 2 : 1,
2584 obj_request->osd_req = osd_req;
2585 obj_request->callback = rbd_img_obj_callback;
2586 obj_request->img_offset = img_offset;
2588 rbd_img_obj_request_fill(obj_request, osd_req, op_type, 0);
2590 rbd_img_request_get(img_request);
2592 img_offset += length;
2599 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
2600 rbd_img_obj_request_del(img_request, obj_request);
2606 rbd_osd_copyup_callback(struct rbd_obj_request *obj_request)
2608 struct rbd_img_request *img_request;
2609 struct rbd_device *rbd_dev;
2610 struct page **pages;
2613 dout("%s: obj %p\n", __func__, obj_request);
2615 rbd_assert(obj_request->type == OBJ_REQUEST_BIO ||
2616 obj_request->type == OBJ_REQUEST_NODATA);
2617 rbd_assert(obj_request_img_data_test(obj_request));
2618 img_request = obj_request->img_request;
2619 rbd_assert(img_request);
2621 rbd_dev = img_request->rbd_dev;
2622 rbd_assert(rbd_dev);
2624 pages = obj_request->copyup_pages;
2625 rbd_assert(pages != NULL);
2626 obj_request->copyup_pages = NULL;
2627 page_count = obj_request->copyup_page_count;
2628 rbd_assert(page_count);
2629 obj_request->copyup_page_count = 0;
2630 ceph_release_page_vector(pages, page_count);
2633 * We want the transfer count to reflect the size of the
2634 * original write request. There is no such thing as a
2635 * successful short write, so if the request was successful
2636 * we can just set it to the originally-requested length.
2638 if (!obj_request->result)
2639 obj_request->xferred = obj_request->length;
2641 obj_request_done_set(obj_request);
2645 rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request)
2647 struct rbd_obj_request *orig_request;
2648 struct ceph_osd_request *osd_req;
2649 struct ceph_osd_client *osdc;
2650 struct rbd_device *rbd_dev;
2651 struct page **pages;
2652 enum obj_operation_type op_type;
2657 rbd_assert(img_request_child_test(img_request));
2659 /* First get what we need from the image request */
2661 pages = img_request->copyup_pages;
2662 rbd_assert(pages != NULL);
2663 img_request->copyup_pages = NULL;
2664 page_count = img_request->copyup_page_count;
2665 rbd_assert(page_count);
2666 img_request->copyup_page_count = 0;
2668 orig_request = img_request->obj_request;
2669 rbd_assert(orig_request != NULL);
2670 rbd_assert(obj_request_type_valid(orig_request->type));
2671 img_result = img_request->result;
2672 parent_length = img_request->length;
2673 rbd_assert(parent_length == img_request->xferred);
2674 rbd_img_request_put(img_request);
2676 rbd_assert(orig_request->img_request);
2677 rbd_dev = orig_request->img_request->rbd_dev;
2678 rbd_assert(rbd_dev);
2681 * If the overlap has become 0 (most likely because the
2682 * image has been flattened) we need to free the pages
2683 * and re-submit the original write request.
2685 if (!rbd_dev->parent_overlap) {
2686 struct ceph_osd_client *osdc;
2688 ceph_release_page_vector(pages, page_count);
2689 osdc = &rbd_dev->rbd_client->client->osdc;
2690 img_result = rbd_obj_request_submit(osdc, orig_request);
2699 * The original osd request is of no use to use any more.
2700 * We need a new one that can hold the three ops in a copyup
2701 * request. Allocate the new copyup osd request for the
2702 * original request, and release the old one.
2704 img_result = -ENOMEM;
2705 osd_req = rbd_osd_req_create_copyup(orig_request);
2708 rbd_osd_req_destroy(orig_request->osd_req);
2709 orig_request->osd_req = osd_req;
2710 orig_request->copyup_pages = pages;
2711 orig_request->copyup_page_count = page_count;
2713 /* Initialize the copyup op */
2715 osd_req_op_cls_init(osd_req, 0, CEPH_OSD_OP_CALL, "rbd", "copyup");
2716 osd_req_op_cls_request_data_pages(osd_req, 0, pages, parent_length, 0,
2719 /* Add the other op(s) */
2721 op_type = rbd_img_request_op_type(orig_request->img_request);
2722 rbd_img_obj_request_fill(orig_request, osd_req, op_type, 1);
2724 /* All set, send it off. */
2726 osdc = &rbd_dev->rbd_client->client->osdc;
2727 img_result = rbd_obj_request_submit(osdc, orig_request);
2731 /* Record the error code and complete the request */
2733 orig_request->result = img_result;
2734 orig_request->xferred = 0;
2735 obj_request_done_set(orig_request);
2736 rbd_obj_request_complete(orig_request);
2740 * Read from the parent image the range of data that covers the
2741 * entire target of the given object request. This is used for
2742 * satisfying a layered image write request when the target of an
2743 * object request from the image request does not exist.
2745 * A page array big enough to hold the returned data is allocated
2746 * and supplied to rbd_img_request_fill() as the "data descriptor."
2747 * When the read completes, this page array will be transferred to
2748 * the original object request for the copyup operation.
2750 * If an error occurs, record it as the result of the original
2751 * object request and mark it done so it gets completed.
2753 static int rbd_img_obj_parent_read_full(struct rbd_obj_request *obj_request)
2755 struct rbd_img_request *img_request = NULL;
2756 struct rbd_img_request *parent_request = NULL;
2757 struct rbd_device *rbd_dev;
2760 struct page **pages = NULL;
2764 rbd_assert(obj_request_img_data_test(obj_request));
2765 rbd_assert(obj_request_type_valid(obj_request->type));
2767 img_request = obj_request->img_request;
2768 rbd_assert(img_request != NULL);
2769 rbd_dev = img_request->rbd_dev;
2770 rbd_assert(rbd_dev->parent != NULL);
2773 * Determine the byte range covered by the object in the
2774 * child image to which the original request was to be sent.
2776 img_offset = obj_request->img_offset - obj_request->offset;
2777 length = (u64)1 << rbd_dev->header.obj_order;
2780 * There is no defined parent data beyond the parent
2781 * overlap, so limit what we read at that boundary if
2784 if (img_offset + length > rbd_dev->parent_overlap) {
2785 rbd_assert(img_offset < rbd_dev->parent_overlap);
2786 length = rbd_dev->parent_overlap - img_offset;
2790 * Allocate a page array big enough to receive the data read
2793 page_count = (u32)calc_pages_for(0, length);
2794 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2795 if (IS_ERR(pages)) {
2796 result = PTR_ERR(pages);
2802 parent_request = rbd_parent_request_create(obj_request,
2803 img_offset, length);
2804 if (!parent_request)
2807 result = rbd_img_request_fill(parent_request, OBJ_REQUEST_PAGES, pages);
2810 parent_request->copyup_pages = pages;
2811 parent_request->copyup_page_count = page_count;
2813 parent_request->callback = rbd_img_obj_parent_read_full_callback;
2814 result = rbd_img_request_submit(parent_request);
2818 parent_request->copyup_pages = NULL;
2819 parent_request->copyup_page_count = 0;
2820 parent_request->obj_request = NULL;
2821 rbd_obj_request_put(obj_request);
2824 ceph_release_page_vector(pages, page_count);
2826 rbd_img_request_put(parent_request);
2827 obj_request->result = result;
2828 obj_request->xferred = 0;
2829 obj_request_done_set(obj_request);
2834 static void rbd_img_obj_exists_callback(struct rbd_obj_request *obj_request)
2836 struct rbd_obj_request *orig_request;
2837 struct rbd_device *rbd_dev;
2840 rbd_assert(!obj_request_img_data_test(obj_request));
2843 * All we need from the object request is the original
2844 * request and the result of the STAT op. Grab those, then
2845 * we're done with the request.
2847 orig_request = obj_request->obj_request;
2848 obj_request->obj_request = NULL;
2849 rbd_obj_request_put(orig_request);
2850 rbd_assert(orig_request);
2851 rbd_assert(orig_request->img_request);
2853 result = obj_request->result;
2854 obj_request->result = 0;
2856 dout("%s: obj %p for obj %p result %d %llu/%llu\n", __func__,
2857 obj_request, orig_request, result,
2858 obj_request->xferred, obj_request->length);
2859 rbd_obj_request_put(obj_request);
2862 * If the overlap has become 0 (most likely because the
2863 * image has been flattened) we need to free the pages
2864 * and re-submit the original write request.
2866 rbd_dev = orig_request->img_request->rbd_dev;
2867 if (!rbd_dev->parent_overlap) {
2868 struct ceph_osd_client *osdc;
2870 osdc = &rbd_dev->rbd_client->client->osdc;
2871 result = rbd_obj_request_submit(osdc, orig_request);
2877 * Our only purpose here is to determine whether the object
2878 * exists, and we don't want to treat the non-existence as
2879 * an error. If something else comes back, transfer the
2880 * error to the original request and complete it now.
2883 obj_request_existence_set(orig_request, true);
2884 } else if (result == -ENOENT) {
2885 obj_request_existence_set(orig_request, false);
2886 } else if (result) {
2887 orig_request->result = result;
2892 * Resubmit the original request now that we have recorded
2893 * whether the target object exists.
2895 orig_request->result = rbd_img_obj_request_submit(orig_request);
2897 if (orig_request->result)
2898 rbd_obj_request_complete(orig_request);
2901 static int rbd_img_obj_exists_submit(struct rbd_obj_request *obj_request)
2903 struct rbd_obj_request *stat_request;
2904 struct rbd_device *rbd_dev;
2905 struct ceph_osd_client *osdc;
2906 struct page **pages = NULL;
2912 * The response data for a STAT call consists of:
2919 size = sizeof (__le64) + sizeof (__le32) + sizeof (__le32);
2920 page_count = (u32)calc_pages_for(0, size);
2921 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2923 return PTR_ERR(pages);
2926 stat_request = rbd_obj_request_create(obj_request->object_name, 0, 0,
2931 rbd_obj_request_get(obj_request);
2932 stat_request->obj_request = obj_request;
2933 stat_request->pages = pages;
2934 stat_request->page_count = page_count;
2936 rbd_assert(obj_request->img_request);
2937 rbd_dev = obj_request->img_request->rbd_dev;
2938 stat_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_READ, 1,
2940 if (!stat_request->osd_req)
2942 stat_request->callback = rbd_img_obj_exists_callback;
2944 osd_req_op_init(stat_request->osd_req, 0, CEPH_OSD_OP_STAT, 0);
2945 osd_req_op_raw_data_in_pages(stat_request->osd_req, 0, pages, size, 0,
2947 rbd_osd_req_format_read(stat_request);
2949 osdc = &rbd_dev->rbd_client->client->osdc;
2950 ret = rbd_obj_request_submit(osdc, stat_request);
2953 rbd_obj_request_put(obj_request);
2958 static bool img_obj_request_simple(struct rbd_obj_request *obj_request)
2960 struct rbd_img_request *img_request;
2961 struct rbd_device *rbd_dev;
2963 rbd_assert(obj_request_img_data_test(obj_request));
2965 img_request = obj_request->img_request;
2966 rbd_assert(img_request);
2967 rbd_dev = img_request->rbd_dev;
2970 if (!img_request_write_test(img_request) &&
2971 !img_request_discard_test(img_request))
2974 /* Non-layered writes */
2975 if (!img_request_layered_test(img_request))
2979 * Layered writes outside of the parent overlap range don't
2980 * share any data with the parent.
2982 if (!obj_request_overlaps_parent(obj_request))
2986 * Entire-object layered writes - we will overwrite whatever
2987 * parent data there is anyway.
2989 if (!obj_request->offset &&
2990 obj_request->length == rbd_obj_bytes(&rbd_dev->header))
2994 * If the object is known to already exist, its parent data has
2995 * already been copied.
2997 if (obj_request_known_test(obj_request) &&
2998 obj_request_exists_test(obj_request))
3004 static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request)
3006 if (img_obj_request_simple(obj_request)) {
3007 struct rbd_device *rbd_dev;
3008 struct ceph_osd_client *osdc;
3010 rbd_dev = obj_request->img_request->rbd_dev;
3011 osdc = &rbd_dev->rbd_client->client->osdc;
3013 return rbd_obj_request_submit(osdc, obj_request);
3017 * It's a layered write. The target object might exist but
3018 * we may not know that yet. If we know it doesn't exist,
3019 * start by reading the data for the full target object from
3020 * the parent so we can use it for a copyup to the target.
3022 if (obj_request_known_test(obj_request))
3023 return rbd_img_obj_parent_read_full(obj_request);
3025 /* We don't know whether the target exists. Go find out. */
3027 return rbd_img_obj_exists_submit(obj_request);
3030 static int rbd_img_request_submit(struct rbd_img_request *img_request)
3032 struct rbd_obj_request *obj_request;
3033 struct rbd_obj_request *next_obj_request;
3036 dout("%s: img %p\n", __func__, img_request);
3038 rbd_img_request_get(img_request);
3039 for_each_obj_request_safe(img_request, obj_request, next_obj_request) {
3040 ret = rbd_img_obj_request_submit(obj_request);
3046 rbd_img_request_put(img_request);
3050 static void rbd_img_parent_read_callback(struct rbd_img_request *img_request)
3052 struct rbd_obj_request *obj_request;
3053 struct rbd_device *rbd_dev;
3058 rbd_assert(img_request_child_test(img_request));
3060 /* First get what we need from the image request and release it */
3062 obj_request = img_request->obj_request;
3063 img_xferred = img_request->xferred;
3064 img_result = img_request->result;
3065 rbd_img_request_put(img_request);
3068 * If the overlap has become 0 (most likely because the
3069 * image has been flattened) we need to re-submit the
3072 rbd_assert(obj_request);
3073 rbd_assert(obj_request->img_request);
3074 rbd_dev = obj_request->img_request->rbd_dev;
3075 if (!rbd_dev->parent_overlap) {
3076 struct ceph_osd_client *osdc;
3078 osdc = &rbd_dev->rbd_client->client->osdc;
3079 img_result = rbd_obj_request_submit(osdc, obj_request);
3084 obj_request->result = img_result;
3085 if (obj_request->result)
3089 * We need to zero anything beyond the parent overlap
3090 * boundary. Since rbd_img_obj_request_read_callback()
3091 * will zero anything beyond the end of a short read, an
3092 * easy way to do this is to pretend the data from the
3093 * parent came up short--ending at the overlap boundary.
3095 rbd_assert(obj_request->img_offset < U64_MAX - obj_request->length);
3096 obj_end = obj_request->img_offset + obj_request->length;
3097 if (obj_end > rbd_dev->parent_overlap) {
3100 if (obj_request->img_offset < rbd_dev->parent_overlap)
3101 xferred = rbd_dev->parent_overlap -
3102 obj_request->img_offset;
3104 obj_request->xferred = min(img_xferred, xferred);
3106 obj_request->xferred = img_xferred;
3109 rbd_img_obj_request_read_callback(obj_request);
3110 rbd_obj_request_complete(obj_request);
3113 static void rbd_img_parent_read(struct rbd_obj_request *obj_request)
3115 struct rbd_img_request *img_request;
3118 rbd_assert(obj_request_img_data_test(obj_request));
3119 rbd_assert(obj_request->img_request != NULL);
3120 rbd_assert(obj_request->result == (s32) -ENOENT);
3121 rbd_assert(obj_request_type_valid(obj_request->type));
3123 /* rbd_read_finish(obj_request, obj_request->length); */
3124 img_request = rbd_parent_request_create(obj_request,
3125 obj_request->img_offset,
3126 obj_request->length);
3131 if (obj_request->type == OBJ_REQUEST_BIO)
3132 result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
3133 obj_request->bio_list);
3135 result = rbd_img_request_fill(img_request, OBJ_REQUEST_PAGES,
3136 obj_request->pages);
3140 img_request->callback = rbd_img_parent_read_callback;
3141 result = rbd_img_request_submit(img_request);
3148 rbd_img_request_put(img_request);
3149 obj_request->result = result;
3150 obj_request->xferred = 0;
3151 obj_request_done_set(obj_request);
3154 static const struct rbd_client_id rbd_empty_cid;
3156 static bool rbd_cid_equal(const struct rbd_client_id *lhs,
3157 const struct rbd_client_id *rhs)
3159 return lhs->gid == rhs->gid && lhs->handle == rhs->handle;
3162 static struct rbd_client_id rbd_get_cid(struct rbd_device *rbd_dev)
3164 struct rbd_client_id cid;
3166 mutex_lock(&rbd_dev->watch_mutex);
3167 cid.gid = ceph_client_gid(rbd_dev->rbd_client->client);
3168 cid.handle = rbd_dev->watch_cookie;
3169 mutex_unlock(&rbd_dev->watch_mutex);
3174 * lock_rwsem must be held for write
3176 static void rbd_set_owner_cid(struct rbd_device *rbd_dev,
3177 const struct rbd_client_id *cid)
3179 dout("%s rbd_dev %p %llu-%llu -> %llu-%llu\n", __func__, rbd_dev,
3180 rbd_dev->owner_cid.gid, rbd_dev->owner_cid.handle,
3181 cid->gid, cid->handle);
3182 rbd_dev->owner_cid = *cid; /* struct */
3185 static void format_lock_cookie(struct rbd_device *rbd_dev, char *buf)
3187 mutex_lock(&rbd_dev->watch_mutex);
3188 sprintf(buf, "%s %llu", RBD_LOCK_COOKIE_PREFIX, rbd_dev->watch_cookie);
3189 mutex_unlock(&rbd_dev->watch_mutex);
3193 * lock_rwsem must be held for write
3195 static int rbd_lock(struct rbd_device *rbd_dev)
3197 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3198 struct rbd_client_id cid = rbd_get_cid(rbd_dev);
3202 WARN_ON(__rbd_is_lock_owner(rbd_dev));
3204 format_lock_cookie(rbd_dev, cookie);
3205 ret = ceph_cls_lock(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
3206 RBD_LOCK_NAME, CEPH_CLS_LOCK_EXCLUSIVE, cookie,
3207 RBD_LOCK_TAG, "", 0);
3211 rbd_dev->lock_state = RBD_LOCK_STATE_LOCKED;
3212 rbd_set_owner_cid(rbd_dev, &cid);
3213 queue_work(rbd_dev->task_wq, &rbd_dev->acquired_lock_work);
3218 * lock_rwsem must be held for write
3220 static int rbd_unlock(struct rbd_device *rbd_dev)
3222 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3226 WARN_ON(!__rbd_is_lock_owner(rbd_dev));
3228 rbd_dev->lock_state = RBD_LOCK_STATE_UNLOCKED;
3230 format_lock_cookie(rbd_dev, cookie);
3231 ret = ceph_cls_unlock(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
3232 RBD_LOCK_NAME, cookie);
3233 if (ret && ret != -ENOENT) {
3234 rbd_warn(rbd_dev, "cls_unlock failed: %d", ret);
3238 rbd_set_owner_cid(rbd_dev, &rbd_empty_cid);
3239 queue_work(rbd_dev->task_wq, &rbd_dev->released_lock_work);
3243 static int __rbd_notify_op_lock(struct rbd_device *rbd_dev,
3244 enum rbd_notify_op notify_op,
3245 struct page ***preply_pages,
3248 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3249 struct rbd_client_id cid = rbd_get_cid(rbd_dev);
3250 int buf_size = 4 + 8 + 8 + CEPH_ENCODING_START_BLK_LEN;
3254 dout("%s rbd_dev %p notify_op %d\n", __func__, rbd_dev, notify_op);
3256 /* encode *LockPayload NotifyMessage (op + ClientId) */
3257 ceph_start_encoding(&p, 2, 1, buf_size - CEPH_ENCODING_START_BLK_LEN);
3258 ceph_encode_32(&p, notify_op);
3259 ceph_encode_64(&p, cid.gid);
3260 ceph_encode_64(&p, cid.handle);
3262 return ceph_osdc_notify(osdc, &rbd_dev->header_oid,
3263 &rbd_dev->header_oloc, buf, buf_size,
3264 RBD_NOTIFY_TIMEOUT, preply_pages, preply_len);
3267 static void rbd_notify_op_lock(struct rbd_device *rbd_dev,
3268 enum rbd_notify_op notify_op)
3270 struct page **reply_pages;
3273 __rbd_notify_op_lock(rbd_dev, notify_op, &reply_pages, &reply_len);
3274 ceph_release_page_vector(reply_pages, calc_pages_for(0, reply_len));
3277 static void rbd_notify_acquired_lock(struct work_struct *work)
3279 struct rbd_device *rbd_dev = container_of(work, struct rbd_device,
3280 acquired_lock_work);
3282 rbd_notify_op_lock(rbd_dev, RBD_NOTIFY_OP_ACQUIRED_LOCK);
3285 static void rbd_notify_released_lock(struct work_struct *work)
3287 struct rbd_device *rbd_dev = container_of(work, struct rbd_device,
3288 released_lock_work);
3290 rbd_notify_op_lock(rbd_dev, RBD_NOTIFY_OP_RELEASED_LOCK);
3293 static int rbd_request_lock(struct rbd_device *rbd_dev)
3295 struct page **reply_pages;
3297 bool lock_owner_responded = false;
3300 dout("%s rbd_dev %p\n", __func__, rbd_dev);
3302 ret = __rbd_notify_op_lock(rbd_dev, RBD_NOTIFY_OP_REQUEST_LOCK,
3303 &reply_pages, &reply_len);
3304 if (ret && ret != -ETIMEDOUT) {
3305 rbd_warn(rbd_dev, "failed to request lock: %d", ret);
3309 if (reply_len > 0 && reply_len <= PAGE_SIZE) {
3310 void *p = page_address(reply_pages[0]);
3311 void *const end = p + reply_len;
3314 ceph_decode_32_safe(&p, end, n, e_inval); /* num_acks */
3319 ceph_decode_need(&p, end, 8 + 8, e_inval);
3320 p += 8 + 8; /* skip gid and cookie */
3322 ceph_decode_32_safe(&p, end, len, e_inval);
3326 if (lock_owner_responded) {
3328 "duplicate lock owners detected");
3333 lock_owner_responded = true;
3334 ret = ceph_start_decoding(&p, end, 1, "ResponseMessage",
3338 "failed to decode ResponseMessage: %d",
3343 ret = ceph_decode_32(&p);
3347 if (!lock_owner_responded) {
3348 rbd_warn(rbd_dev, "no lock owners detected");
3353 ceph_release_page_vector(reply_pages, calc_pages_for(0, reply_len));
3361 static void wake_requests(struct rbd_device *rbd_dev, bool wake_all)
3363 dout("%s rbd_dev %p wake_all %d\n", __func__, rbd_dev, wake_all);
3365 cancel_delayed_work(&rbd_dev->lock_dwork);
3367 wake_up_all(&rbd_dev->lock_waitq);
3369 wake_up(&rbd_dev->lock_waitq);
3372 static int get_lock_owner_info(struct rbd_device *rbd_dev,
3373 struct ceph_locker **lockers, u32 *num_lockers)
3375 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3380 dout("%s rbd_dev %p\n", __func__, rbd_dev);
3382 ret = ceph_cls_lock_info(osdc, &rbd_dev->header_oid,
3383 &rbd_dev->header_oloc, RBD_LOCK_NAME,
3384 &lock_type, &lock_tag, lockers, num_lockers);
3388 if (*num_lockers == 0) {
3389 dout("%s rbd_dev %p no lockers detected\n", __func__, rbd_dev);
3393 if (strcmp(lock_tag, RBD_LOCK_TAG)) {
3394 rbd_warn(rbd_dev, "locked by external mechanism, tag %s",
3400 if (lock_type == CEPH_CLS_LOCK_SHARED) {
3401 rbd_warn(rbd_dev, "shared lock type detected");
3406 if (strncmp((*lockers)[0].id.cookie, RBD_LOCK_COOKIE_PREFIX,
3407 strlen(RBD_LOCK_COOKIE_PREFIX))) {
3408 rbd_warn(rbd_dev, "locked by external mechanism, cookie %s",
3409 (*lockers)[0].id.cookie);
3419 static int find_watcher(struct rbd_device *rbd_dev,
3420 const struct ceph_locker *locker)
3422 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3423 struct ceph_watch_item *watchers;
3429 ret = ceph_osdc_list_watchers(osdc, &rbd_dev->header_oid,
3430 &rbd_dev->header_oloc, &watchers,
3435 sscanf(locker->id.cookie, RBD_LOCK_COOKIE_PREFIX " %llu", &cookie);
3436 for (i = 0; i < num_watchers; i++) {
3437 if (!memcmp(&watchers[i].addr, &locker->info.addr,
3438 sizeof(locker->info.addr)) &&
3439 watchers[i].cookie == cookie) {
3440 struct rbd_client_id cid = {
3441 .gid = le64_to_cpu(watchers[i].name.num),
3445 dout("%s rbd_dev %p found cid %llu-%llu\n", __func__,
3446 rbd_dev, cid.gid, cid.handle);
3447 rbd_set_owner_cid(rbd_dev, &cid);
3453 dout("%s rbd_dev %p no watchers\n", __func__, rbd_dev);
3461 * lock_rwsem must be held for write
3463 static int rbd_try_lock(struct rbd_device *rbd_dev)
3465 struct ceph_client *client = rbd_dev->rbd_client->client;
3466 struct ceph_locker *lockers;
3471 ret = rbd_lock(rbd_dev);
3475 /* determine if the current lock holder is still alive */
3476 ret = get_lock_owner_info(rbd_dev, &lockers, &num_lockers);
3480 if (num_lockers == 0)
3483 ret = find_watcher(rbd_dev, lockers);
3486 ret = 0; /* have to request lock */
3490 rbd_warn(rbd_dev, "%s%llu seems dead, breaking lock",
3491 ENTITY_NAME(lockers[0].id.name));
3493 ret = ceph_monc_blacklist_add(&client->monc,
3494 &lockers[0].info.addr);
3496 rbd_warn(rbd_dev, "blacklist of %s%llu failed: %d",
3497 ENTITY_NAME(lockers[0].id.name), ret);
3501 ret = ceph_cls_break_lock(&client->osdc, &rbd_dev->header_oid,
3502 &rbd_dev->header_oloc, RBD_LOCK_NAME,
3503 lockers[0].id.cookie,
3504 &lockers[0].id.name);
3505 if (ret && ret != -ENOENT)
3509 ceph_free_lockers(lockers, num_lockers);
3513 ceph_free_lockers(lockers, num_lockers);
3518 * ret is set only if lock_state is RBD_LOCK_STATE_UNLOCKED
3520 static enum rbd_lock_state rbd_try_acquire_lock(struct rbd_device *rbd_dev,
3523 enum rbd_lock_state lock_state;
3525 down_read(&rbd_dev->lock_rwsem);
3526 dout("%s rbd_dev %p read lock_state %d\n", __func__, rbd_dev,
3527 rbd_dev->lock_state);
3528 if (__rbd_is_lock_owner(rbd_dev)) {
3529 lock_state = rbd_dev->lock_state;
3530 up_read(&rbd_dev->lock_rwsem);
3534 up_read(&rbd_dev->lock_rwsem);
3535 down_write(&rbd_dev->lock_rwsem);
3536 dout("%s rbd_dev %p write lock_state %d\n", __func__, rbd_dev,
3537 rbd_dev->lock_state);
3538 if (!__rbd_is_lock_owner(rbd_dev)) {
3539 *pret = rbd_try_lock(rbd_dev);
3541 rbd_warn(rbd_dev, "failed to acquire lock: %d", *pret);
3544 lock_state = rbd_dev->lock_state;
3545 up_write(&rbd_dev->lock_rwsem);
3549 static void rbd_acquire_lock(struct work_struct *work)
3551 struct rbd_device *rbd_dev = container_of(to_delayed_work(work),
3552 struct rbd_device, lock_dwork);
3553 enum rbd_lock_state lock_state;
3556 dout("%s rbd_dev %p\n", __func__, rbd_dev);
3558 lock_state = rbd_try_acquire_lock(rbd_dev, &ret);
3559 if (lock_state != RBD_LOCK_STATE_UNLOCKED || ret == -EBLACKLISTED) {
3560 if (lock_state == RBD_LOCK_STATE_LOCKED)
3561 wake_requests(rbd_dev, true);
3562 dout("%s rbd_dev %p lock_state %d ret %d - done\n", __func__,
3563 rbd_dev, lock_state, ret);
3567 ret = rbd_request_lock(rbd_dev);
3568 if (ret == -ETIMEDOUT) {
3569 goto again; /* treat this as a dead client */
3570 } else if (ret < 0) {
3571 rbd_warn(rbd_dev, "error requesting lock: %d", ret);
3572 mod_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork,
3576 * lock owner acked, but resend if we don't see them
3579 dout("%s rbd_dev %p requeueing lock_dwork\n", __func__,
3581 mod_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork,
3582 msecs_to_jiffies(2 * RBD_NOTIFY_TIMEOUT * MSEC_PER_SEC));
3587 * lock_rwsem must be held for write
3589 static bool rbd_release_lock(struct rbd_device *rbd_dev)
3591 dout("%s rbd_dev %p read lock_state %d\n", __func__, rbd_dev,
3592 rbd_dev->lock_state);
3593 if (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED)
3596 rbd_dev->lock_state = RBD_LOCK_STATE_RELEASING;
3597 downgrade_write(&rbd_dev->lock_rwsem);
3599 * Ensure that all in-flight IO is flushed.
3601 * FIXME: ceph_osdc_sync() flushes the entire OSD client, which
3602 * may be shared with other devices.
3604 ceph_osdc_sync(&rbd_dev->rbd_client->client->osdc);
3605 up_read(&rbd_dev->lock_rwsem);
3607 down_write(&rbd_dev->lock_rwsem);
3608 dout("%s rbd_dev %p write lock_state %d\n", __func__, rbd_dev,
3609 rbd_dev->lock_state);
3610 if (rbd_dev->lock_state != RBD_LOCK_STATE_RELEASING)
3613 if (!rbd_unlock(rbd_dev))
3615 * Give others a chance to grab the lock - we would re-acquire
3616 * almost immediately if we got new IO during ceph_osdc_sync()
3617 * otherwise. We need to ack our own notifications, so this
3618 * lock_dwork will be requeued from rbd_wait_state_locked()
3619 * after wake_requests() in rbd_handle_released_lock().
3621 cancel_delayed_work(&rbd_dev->lock_dwork);
3626 static void rbd_release_lock_work(struct work_struct *work)
3628 struct rbd_device *rbd_dev = container_of(work, struct rbd_device,
3631 down_write(&rbd_dev->lock_rwsem);
3632 rbd_release_lock(rbd_dev);
3633 up_write(&rbd_dev->lock_rwsem);
3636 static void rbd_handle_acquired_lock(struct rbd_device *rbd_dev, u8 struct_v,
3639 struct rbd_client_id cid = { 0 };
3641 if (struct_v >= 2) {
3642 cid.gid = ceph_decode_64(p);
3643 cid.handle = ceph_decode_64(p);
3646 dout("%s rbd_dev %p cid %llu-%llu\n", __func__, rbd_dev, cid.gid,
3648 if (!rbd_cid_equal(&cid, &rbd_empty_cid)) {
3649 down_write(&rbd_dev->lock_rwsem);
3650 if (rbd_cid_equal(&cid, &rbd_dev->owner_cid)) {
3652 * we already know that the remote client is
3655 up_write(&rbd_dev->lock_rwsem);
3659 rbd_set_owner_cid(rbd_dev, &cid);
3660 downgrade_write(&rbd_dev->lock_rwsem);
3662 down_read(&rbd_dev->lock_rwsem);
3665 if (!__rbd_is_lock_owner(rbd_dev))
3666 wake_requests(rbd_dev, false);
3667 up_read(&rbd_dev->lock_rwsem);
3670 static void rbd_handle_released_lock(struct rbd_device *rbd_dev, u8 struct_v,
3673 struct rbd_client_id cid = { 0 };
3675 if (struct_v >= 2) {
3676 cid.gid = ceph_decode_64(p);
3677 cid.handle = ceph_decode_64(p);
3680 dout("%s rbd_dev %p cid %llu-%llu\n", __func__, rbd_dev, cid.gid,
3682 if (!rbd_cid_equal(&cid, &rbd_empty_cid)) {
3683 down_write(&rbd_dev->lock_rwsem);
3684 if (!rbd_cid_equal(&cid, &rbd_dev->owner_cid)) {
3685 dout("%s rbd_dev %p unexpected owner, cid %llu-%llu != owner_cid %llu-%llu\n",
3686 __func__, rbd_dev, cid.gid, cid.handle,
3687 rbd_dev->owner_cid.gid, rbd_dev->owner_cid.handle);
3688 up_write(&rbd_dev->lock_rwsem);
3692 rbd_set_owner_cid(rbd_dev, &rbd_empty_cid);
3693 downgrade_write(&rbd_dev->lock_rwsem);
3695 down_read(&rbd_dev->lock_rwsem);
3698 if (!__rbd_is_lock_owner(rbd_dev))
3699 wake_requests(rbd_dev, false);
3700 up_read(&rbd_dev->lock_rwsem);
3703 static bool rbd_handle_request_lock(struct rbd_device *rbd_dev, u8 struct_v,
3706 struct rbd_client_id my_cid = rbd_get_cid(rbd_dev);
3707 struct rbd_client_id cid = { 0 };
3710 if (struct_v >= 2) {
3711 cid.gid = ceph_decode_64(p);
3712 cid.handle = ceph_decode_64(p);
3715 dout("%s rbd_dev %p cid %llu-%llu\n", __func__, rbd_dev, cid.gid,
3717 if (rbd_cid_equal(&cid, &my_cid))
3720 down_read(&rbd_dev->lock_rwsem);
3721 need_to_send = __rbd_is_lock_owner(rbd_dev);
3722 if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED) {
3723 if (!rbd_cid_equal(&rbd_dev->owner_cid, &rbd_empty_cid)) {
3724 dout("%s rbd_dev %p queueing unlock_work\n", __func__,
3726 queue_work(rbd_dev->task_wq, &rbd_dev->unlock_work);
3729 up_read(&rbd_dev->lock_rwsem);
3730 return need_to_send;
3733 static void __rbd_acknowledge_notify(struct rbd_device *rbd_dev,
3734 u64 notify_id, u64 cookie, s32 *result)
3736 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3737 int buf_size = 4 + CEPH_ENCODING_START_BLK_LEN;
3744 /* encode ResponseMessage */
3745 ceph_start_encoding(&p, 1, 1,
3746 buf_size - CEPH_ENCODING_START_BLK_LEN);
3747 ceph_encode_32(&p, *result);
3752 ret = ceph_osdc_notify_ack(osdc, &rbd_dev->header_oid,
3753 &rbd_dev->header_oloc, notify_id, cookie,
3756 rbd_warn(rbd_dev, "acknowledge_notify failed: %d", ret);
3759 static void rbd_acknowledge_notify(struct rbd_device *rbd_dev, u64 notify_id,
3762 dout("%s rbd_dev %p\n", __func__, rbd_dev);
3763 __rbd_acknowledge_notify(rbd_dev, notify_id, cookie, NULL);
3766 static void rbd_acknowledge_notify_result(struct rbd_device *rbd_dev,
3767 u64 notify_id, u64 cookie, s32 result)
3769 dout("%s rbd_dev %p result %d\n", __func__, rbd_dev, result);
3770 __rbd_acknowledge_notify(rbd_dev, notify_id, cookie, &result);
3773 static void rbd_watch_cb(void *arg, u64 notify_id, u64 cookie,
3774 u64 notifier_id, void *data, size_t data_len)
3776 struct rbd_device *rbd_dev = arg;
3778 void *const end = p + data_len;
3784 dout("%s rbd_dev %p cookie %llu notify_id %llu data_len %zu\n",
3785 __func__, rbd_dev, cookie, notify_id, data_len);
3787 ret = ceph_start_decoding(&p, end, 1, "NotifyMessage",
3790 rbd_warn(rbd_dev, "failed to decode NotifyMessage: %d",
3795 notify_op = ceph_decode_32(&p);
3797 /* legacy notification for header updates */
3798 notify_op = RBD_NOTIFY_OP_HEADER_UPDATE;
3802 dout("%s rbd_dev %p notify_op %u\n", __func__, rbd_dev, notify_op);
3803 switch (notify_op) {
3804 case RBD_NOTIFY_OP_ACQUIRED_LOCK:
3805 rbd_handle_acquired_lock(rbd_dev, struct_v, &p);
3806 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
3808 case RBD_NOTIFY_OP_RELEASED_LOCK:
3809 rbd_handle_released_lock(rbd_dev, struct_v, &p);
3810 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
3812 case RBD_NOTIFY_OP_REQUEST_LOCK:
3813 if (rbd_handle_request_lock(rbd_dev, struct_v, &p))
3815 * send ResponseMessage(0) back so the client
3816 * can detect a missing owner
3818 rbd_acknowledge_notify_result(rbd_dev, notify_id,
3821 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
3823 case RBD_NOTIFY_OP_HEADER_UPDATE:
3824 ret = rbd_dev_refresh(rbd_dev);
3826 rbd_warn(rbd_dev, "refresh failed: %d", ret);
3828 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
3831 if (rbd_is_lock_owner(rbd_dev))
3832 rbd_acknowledge_notify_result(rbd_dev, notify_id,
3833 cookie, -EOPNOTSUPP);
3835 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
3840 static void __rbd_unregister_watch(struct rbd_device *rbd_dev);
3842 static void rbd_watch_errcb(void *arg, u64 cookie, int err)
3844 struct rbd_device *rbd_dev = arg;
3846 rbd_warn(rbd_dev, "encountered watch error: %d", err);
3848 down_write(&rbd_dev->lock_rwsem);
3849 rbd_set_owner_cid(rbd_dev, &rbd_empty_cid);
3850 up_write(&rbd_dev->lock_rwsem);
3852 mutex_lock(&rbd_dev->watch_mutex);
3853 if (rbd_dev->watch_state == RBD_WATCH_STATE_REGISTERED) {
3854 __rbd_unregister_watch(rbd_dev);
3855 rbd_dev->watch_state = RBD_WATCH_STATE_ERROR;
3857 queue_delayed_work(rbd_dev->task_wq, &rbd_dev->watch_dwork, 0);
3859 mutex_unlock(&rbd_dev->watch_mutex);
3863 * watch_mutex must be locked
3865 static int __rbd_register_watch(struct rbd_device *rbd_dev)
3867 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3868 struct ceph_osd_linger_request *handle;
3870 rbd_assert(!rbd_dev->watch_handle);
3871 dout("%s rbd_dev %p\n", __func__, rbd_dev);
3873 handle = ceph_osdc_watch(osdc, &rbd_dev->header_oid,
3874 &rbd_dev->header_oloc, rbd_watch_cb,
3875 rbd_watch_errcb, rbd_dev);
3877 return PTR_ERR(handle);
3879 rbd_dev->watch_handle = handle;
3884 * watch_mutex must be locked
3886 static void __rbd_unregister_watch(struct rbd_device *rbd_dev)
3888 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3891 rbd_assert(rbd_dev->watch_handle);
3892 dout("%s rbd_dev %p\n", __func__, rbd_dev);
3894 ret = ceph_osdc_unwatch(osdc, rbd_dev->watch_handle);
3896 rbd_warn(rbd_dev, "failed to unwatch: %d", ret);
3898 rbd_dev->watch_handle = NULL;
3901 static int rbd_register_watch(struct rbd_device *rbd_dev)
3905 mutex_lock(&rbd_dev->watch_mutex);
3906 rbd_assert(rbd_dev->watch_state == RBD_WATCH_STATE_UNREGISTERED);
3907 ret = __rbd_register_watch(rbd_dev);
3911 rbd_dev->watch_state = RBD_WATCH_STATE_REGISTERED;
3912 rbd_dev->watch_cookie = rbd_dev->watch_handle->linger_id;
3915 mutex_unlock(&rbd_dev->watch_mutex);
3919 static void cancel_tasks_sync(struct rbd_device *rbd_dev)
3921 dout("%s rbd_dev %p\n", __func__, rbd_dev);
3923 cancel_delayed_work_sync(&rbd_dev->watch_dwork);
3924 cancel_work_sync(&rbd_dev->acquired_lock_work);
3925 cancel_work_sync(&rbd_dev->released_lock_work);
3926 cancel_delayed_work_sync(&rbd_dev->lock_dwork);
3927 cancel_work_sync(&rbd_dev->unlock_work);
3930 static void rbd_unregister_watch(struct rbd_device *rbd_dev)
3932 WARN_ON(waitqueue_active(&rbd_dev->lock_waitq));
3933 cancel_tasks_sync(rbd_dev);
3935 mutex_lock(&rbd_dev->watch_mutex);
3936 if (rbd_dev->watch_state == RBD_WATCH_STATE_REGISTERED)
3937 __rbd_unregister_watch(rbd_dev);
3938 rbd_dev->watch_state = RBD_WATCH_STATE_UNREGISTERED;
3939 mutex_unlock(&rbd_dev->watch_mutex);
3941 ceph_osdc_flush_notifies(&rbd_dev->rbd_client->client->osdc);
3944 static void rbd_reregister_watch(struct work_struct *work)
3946 struct rbd_device *rbd_dev = container_of(to_delayed_work(work),
3947 struct rbd_device, watch_dwork);
3948 bool was_lock_owner = false;
3951 dout("%s rbd_dev %p\n", __func__, rbd_dev);
3953 down_write(&rbd_dev->lock_rwsem);
3954 if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED)
3955 was_lock_owner = rbd_release_lock(rbd_dev);
3957 mutex_lock(&rbd_dev->watch_mutex);
3958 if (rbd_dev->watch_state != RBD_WATCH_STATE_ERROR)
3961 ret = __rbd_register_watch(rbd_dev);
3963 rbd_warn(rbd_dev, "failed to reregister watch: %d", ret);
3964 if (ret != -EBLACKLISTED)
3965 queue_delayed_work(rbd_dev->task_wq,
3966 &rbd_dev->watch_dwork,
3971 rbd_dev->watch_state = RBD_WATCH_STATE_REGISTERED;
3972 rbd_dev->watch_cookie = rbd_dev->watch_handle->linger_id;
3973 mutex_unlock(&rbd_dev->watch_mutex);
3975 ret = rbd_dev_refresh(rbd_dev);
3977 rbd_warn(rbd_dev, "reregisteration refresh failed: %d", ret);
3979 if (was_lock_owner) {
3980 ret = rbd_try_lock(rbd_dev);
3982 rbd_warn(rbd_dev, "reregisteration lock failed: %d",
3986 up_write(&rbd_dev->lock_rwsem);
3987 wake_requests(rbd_dev, true);
3991 mutex_unlock(&rbd_dev->watch_mutex);
3992 up_write(&rbd_dev->lock_rwsem);
3996 * Synchronous osd object method call. Returns the number of bytes
3997 * returned in the outbound buffer, or a negative error code.
3999 static int rbd_obj_method_sync(struct rbd_device *rbd_dev,
4000 const char *object_name,
4001 const char *class_name,
4002 const char *method_name,
4003 const void *outbound,
4004 size_t outbound_size,
4006 size_t inbound_size)
4008 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4009 struct rbd_obj_request *obj_request;
4010 struct page **pages;
4015 * Method calls are ultimately read operations. The result
4016 * should placed into the inbound buffer provided. They
4017 * also supply outbound data--parameters for the object
4018 * method. Currently if this is present it will be a
4021 page_count = (u32)calc_pages_for(0, inbound_size);
4022 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
4024 return PTR_ERR(pages);
4027 obj_request = rbd_obj_request_create(object_name, 0, inbound_size,
4032 obj_request->pages = pages;
4033 obj_request->page_count = page_count;
4035 obj_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_READ, 1,
4037 if (!obj_request->osd_req)
4040 osd_req_op_cls_init(obj_request->osd_req, 0, CEPH_OSD_OP_CALL,
4041 class_name, method_name);
4042 if (outbound_size) {
4043 struct ceph_pagelist *pagelist;
4045 pagelist = kmalloc(sizeof (*pagelist), GFP_NOFS);
4049 ceph_pagelist_init(pagelist);
4050 ceph_pagelist_append(pagelist, outbound, outbound_size);
4051 osd_req_op_cls_request_data_pagelist(obj_request->osd_req, 0,
4054 osd_req_op_cls_response_data_pages(obj_request->osd_req, 0,
4055 obj_request->pages, inbound_size,
4057 rbd_osd_req_format_read(obj_request);
4059 ret = rbd_obj_request_submit(osdc, obj_request);
4062 ret = rbd_obj_request_wait(obj_request);
4066 ret = obj_request->result;
4070 rbd_assert(obj_request->xferred < (u64)INT_MAX);
4071 ret = (int)obj_request->xferred;
4072 ceph_copy_from_page_vector(pages, inbound, 0, obj_request->xferred);
4075 rbd_obj_request_put(obj_request);
4077 ceph_release_page_vector(pages, page_count);
4083 * lock_rwsem must be held for read
4085 static void rbd_wait_state_locked(struct rbd_device *rbd_dev)
4091 * Note the use of mod_delayed_work() in rbd_acquire_lock()
4092 * and cancel_delayed_work() in wake_requests().
4094 dout("%s rbd_dev %p queueing lock_dwork\n", __func__, rbd_dev);
4095 queue_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork, 0);
4096 prepare_to_wait_exclusive(&rbd_dev->lock_waitq, &wait,
4097 TASK_UNINTERRUPTIBLE);
4098 up_read(&rbd_dev->lock_rwsem);
4100 down_read(&rbd_dev->lock_rwsem);
4101 } while (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED);
4102 finish_wait(&rbd_dev->lock_waitq, &wait);
4105 static void rbd_queue_workfn(struct work_struct *work)
4107 struct request *rq = blk_mq_rq_from_pdu(work);
4108 struct rbd_device *rbd_dev = rq->q->queuedata;
4109 struct rbd_img_request *img_request;
4110 struct ceph_snap_context *snapc = NULL;
4111 u64 offset = (u64)blk_rq_pos(rq) << SECTOR_SHIFT;
4112 u64 length = blk_rq_bytes(rq);
4113 enum obj_operation_type op_type;
4115 bool must_be_locked;
4118 if (rq->cmd_type != REQ_TYPE_FS) {
4119 dout("%s: non-fs request type %d\n", __func__,
4120 (int) rq->cmd_type);
4125 if (req_op(rq) == REQ_OP_DISCARD)
4126 op_type = OBJ_OP_DISCARD;
4127 else if (req_op(rq) == REQ_OP_WRITE)
4128 op_type = OBJ_OP_WRITE;
4130 op_type = OBJ_OP_READ;
4132 /* Ignore/skip any zero-length requests */
4135 dout("%s: zero-length request\n", __func__);
4140 /* Only reads are allowed to a read-only device */
4142 if (op_type != OBJ_OP_READ) {
4143 if (rbd_dev->mapping.read_only) {
4147 rbd_assert(rbd_dev->spec->snap_id == CEPH_NOSNAP);
4151 * Quit early if the mapped snapshot no longer exists. It's
4152 * still possible the snapshot will have disappeared by the
4153 * time our request arrives at the osd, but there's no sense in
4154 * sending it if we already know.
4156 if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags)) {
4157 dout("request for non-existent snapshot");
4158 rbd_assert(rbd_dev->spec->snap_id != CEPH_NOSNAP);
4163 if (offset && length > U64_MAX - offset + 1) {
4164 rbd_warn(rbd_dev, "bad request range (%llu~%llu)", offset,
4167 goto err_rq; /* Shouldn't happen */
4170 blk_mq_start_request(rq);
4172 down_read(&rbd_dev->header_rwsem);
4173 mapping_size = rbd_dev->mapping.size;
4174 if (op_type != OBJ_OP_READ) {
4175 snapc = rbd_dev->header.snapc;
4176 ceph_get_snap_context(snapc);
4177 must_be_locked = rbd_is_lock_supported(rbd_dev);
4179 must_be_locked = rbd_dev->opts->lock_on_read &&
4180 rbd_is_lock_supported(rbd_dev);
4182 up_read(&rbd_dev->header_rwsem);
4184 if (offset + length > mapping_size) {
4185 rbd_warn(rbd_dev, "beyond EOD (%llu~%llu > %llu)", offset,
4186 length, mapping_size);
4191 if (must_be_locked) {
4192 down_read(&rbd_dev->lock_rwsem);
4193 if (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED)
4194 rbd_wait_state_locked(rbd_dev);
4197 img_request = rbd_img_request_create(rbd_dev, offset, length, op_type,
4203 img_request->rq = rq;
4204 snapc = NULL; /* img_request consumes a ref */
4206 if (op_type == OBJ_OP_DISCARD)
4207 result = rbd_img_request_fill(img_request, OBJ_REQUEST_NODATA,
4210 result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
4213 goto err_img_request;
4215 result = rbd_img_request_submit(img_request);
4217 goto err_img_request;
4220 up_read(&rbd_dev->lock_rwsem);
4224 rbd_img_request_put(img_request);
4227 up_read(&rbd_dev->lock_rwsem);
4230 rbd_warn(rbd_dev, "%s %llx at %llx result %d",
4231 obj_op_name(op_type), length, offset, result);
4232 ceph_put_snap_context(snapc);
4234 blk_mq_end_request(rq, result);
4237 static int rbd_queue_rq(struct blk_mq_hw_ctx *hctx,
4238 const struct blk_mq_queue_data *bd)
4240 struct request *rq = bd->rq;
4241 struct work_struct *work = blk_mq_rq_to_pdu(rq);
4243 queue_work(rbd_wq, work);
4244 return BLK_MQ_RQ_QUEUE_OK;
4247 static void rbd_free_disk(struct rbd_device *rbd_dev)
4249 struct gendisk *disk = rbd_dev->disk;
4254 rbd_dev->disk = NULL;
4255 if (disk->flags & GENHD_FL_UP) {
4258 blk_cleanup_queue(disk->queue);
4259 blk_mq_free_tag_set(&rbd_dev->tag_set);
4264 static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
4265 const char *object_name,
4266 u64 offset, u64 length, void *buf)
4269 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4270 struct rbd_obj_request *obj_request;
4271 struct page **pages = NULL;
4276 page_count = (u32) calc_pages_for(offset, length);
4277 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
4279 return PTR_ERR(pages);
4282 obj_request = rbd_obj_request_create(object_name, offset, length,
4287 obj_request->pages = pages;
4288 obj_request->page_count = page_count;
4290 obj_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_READ, 1,
4292 if (!obj_request->osd_req)
4295 osd_req_op_extent_init(obj_request->osd_req, 0, CEPH_OSD_OP_READ,
4296 offset, length, 0, 0);
4297 osd_req_op_extent_osd_data_pages(obj_request->osd_req, 0,
4299 obj_request->length,
4300 obj_request->offset & ~PAGE_MASK,
4302 rbd_osd_req_format_read(obj_request);
4304 ret = rbd_obj_request_submit(osdc, obj_request);
4307 ret = rbd_obj_request_wait(obj_request);
4311 ret = obj_request->result;
4315 rbd_assert(obj_request->xferred <= (u64) SIZE_MAX);
4316 size = (size_t) obj_request->xferred;
4317 ceph_copy_from_page_vector(pages, buf, 0, size);
4318 rbd_assert(size <= (size_t)INT_MAX);
4322 rbd_obj_request_put(obj_request);
4324 ceph_release_page_vector(pages, page_count);
4330 * Read the complete header for the given rbd device. On successful
4331 * return, the rbd_dev->header field will contain up-to-date
4332 * information about the image.
4334 static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev)
4336 struct rbd_image_header_ondisk *ondisk = NULL;
4343 * The complete header will include an array of its 64-bit
4344 * snapshot ids, followed by the names of those snapshots as
4345 * a contiguous block of NUL-terminated strings. Note that
4346 * the number of snapshots could change by the time we read
4347 * it in, in which case we re-read it.
4354 size = sizeof (*ondisk);
4355 size += snap_count * sizeof (struct rbd_image_snap_ondisk);
4357 ondisk = kmalloc(size, GFP_KERNEL);
4361 ret = rbd_obj_read_sync(rbd_dev, rbd_dev->header_oid.name,
4365 if ((size_t)ret < size) {
4367 rbd_warn(rbd_dev, "short header read (want %zd got %d)",
4371 if (!rbd_dev_ondisk_valid(ondisk)) {
4373 rbd_warn(rbd_dev, "invalid header");
4377 names_size = le64_to_cpu(ondisk->snap_names_len);
4378 want_count = snap_count;
4379 snap_count = le32_to_cpu(ondisk->snap_count);
4380 } while (snap_count != want_count);
4382 ret = rbd_header_from_disk(rbd_dev, ondisk);
4390 * Clear the rbd device's EXISTS flag if the snapshot it's mapped to
4391 * has disappeared from the (just updated) snapshot context.
4393 static void rbd_exists_validate(struct rbd_device *rbd_dev)
4397 if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags))
4400 snap_id = rbd_dev->spec->snap_id;
4401 if (snap_id == CEPH_NOSNAP)
4404 if (rbd_dev_snap_index(rbd_dev, snap_id) == BAD_SNAP_INDEX)
4405 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
4408 static void rbd_dev_update_size(struct rbd_device *rbd_dev)
4413 * If EXISTS is not set, rbd_dev->disk may be NULL, so don't
4414 * try to update its size. If REMOVING is set, updating size
4415 * is just useless work since the device can't be opened.
4417 if (test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags) &&
4418 !test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags)) {
4419 size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE;
4420 dout("setting size to %llu sectors", (unsigned long long)size);
4421 set_capacity(rbd_dev->disk, size);
4422 revalidate_disk(rbd_dev->disk);
4426 static int rbd_dev_refresh(struct rbd_device *rbd_dev)
4431 down_write(&rbd_dev->header_rwsem);
4432 mapping_size = rbd_dev->mapping.size;
4434 ret = rbd_dev_header_info(rbd_dev);
4439 * If there is a parent, see if it has disappeared due to the
4440 * mapped image getting flattened.
4442 if (rbd_dev->parent) {
4443 ret = rbd_dev_v2_parent_info(rbd_dev);
4448 if (rbd_dev->spec->snap_id == CEPH_NOSNAP) {
4449 rbd_dev->mapping.size = rbd_dev->header.image_size;
4451 /* validate mapped snapshot's EXISTS flag */
4452 rbd_exists_validate(rbd_dev);
4456 up_write(&rbd_dev->header_rwsem);
4457 if (!ret && mapping_size != rbd_dev->mapping.size)
4458 rbd_dev_update_size(rbd_dev);
4463 static int rbd_init_request(void *data, struct request *rq,
4464 unsigned int hctx_idx, unsigned int request_idx,
4465 unsigned int numa_node)
4467 struct work_struct *work = blk_mq_rq_to_pdu(rq);
4469 INIT_WORK(work, rbd_queue_workfn);
4473 static struct blk_mq_ops rbd_mq_ops = {
4474 .queue_rq = rbd_queue_rq,
4475 .map_queue = blk_mq_map_queue,
4476 .init_request = rbd_init_request,
4479 static int rbd_init_disk(struct rbd_device *rbd_dev)
4481 struct gendisk *disk;
4482 struct request_queue *q;
4486 /* create gendisk info */
4487 disk = alloc_disk(single_major ?
4488 (1 << RBD_SINGLE_MAJOR_PART_SHIFT) :
4489 RBD_MINORS_PER_MAJOR);
4493 snprintf(disk->disk_name, sizeof(disk->disk_name), RBD_DRV_NAME "%d",
4495 disk->major = rbd_dev->major;
4496 disk->first_minor = rbd_dev->minor;
4498 disk->flags |= GENHD_FL_EXT_DEVT;
4499 disk->fops = &rbd_bd_ops;
4500 disk->private_data = rbd_dev;
4502 memset(&rbd_dev->tag_set, 0, sizeof(rbd_dev->tag_set));
4503 rbd_dev->tag_set.ops = &rbd_mq_ops;
4504 rbd_dev->tag_set.queue_depth = rbd_dev->opts->queue_depth;
4505 rbd_dev->tag_set.numa_node = NUMA_NO_NODE;
4506 rbd_dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE;
4507 rbd_dev->tag_set.nr_hw_queues = 1;
4508 rbd_dev->tag_set.cmd_size = sizeof(struct work_struct);
4510 err = blk_mq_alloc_tag_set(&rbd_dev->tag_set);
4514 q = blk_mq_init_queue(&rbd_dev->tag_set);
4520 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
4521 /* QUEUE_FLAG_ADD_RANDOM is off by default for blk-mq */
4523 /* set io sizes to object size */
4524 segment_size = rbd_obj_bytes(&rbd_dev->header);
4525 blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE);
4526 q->limits.max_sectors = queue_max_hw_sectors(q);
4527 blk_queue_max_segments(q, segment_size / SECTOR_SIZE);
4528 blk_queue_max_segment_size(q, segment_size);
4529 blk_queue_io_min(q, segment_size);
4530 blk_queue_io_opt(q, segment_size);
4532 /* enable the discard support */
4533 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
4534 q->limits.discard_granularity = segment_size;
4535 q->limits.discard_alignment = segment_size;
4536 blk_queue_max_discard_sectors(q, segment_size / SECTOR_SIZE);
4537 q->limits.discard_zeroes_data = 1;
4539 if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC))
4540 q->backing_dev_info.capabilities |= BDI_CAP_STABLE_WRITES;
4544 q->queuedata = rbd_dev;
4546 rbd_dev->disk = disk;
4550 blk_mq_free_tag_set(&rbd_dev->tag_set);
4560 static struct rbd_device *dev_to_rbd_dev(struct device *dev)
4562 return container_of(dev, struct rbd_device, dev);
4565 static ssize_t rbd_size_show(struct device *dev,
4566 struct device_attribute *attr, char *buf)
4568 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4570 return sprintf(buf, "%llu\n",
4571 (unsigned long long)rbd_dev->mapping.size);
4575 * Note this shows the features for whatever's mapped, which is not
4576 * necessarily the base image.
4578 static ssize_t rbd_features_show(struct device *dev,
4579 struct device_attribute *attr, char *buf)
4581 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4583 return sprintf(buf, "0x%016llx\n",
4584 (unsigned long long)rbd_dev->mapping.features);
4587 static ssize_t rbd_major_show(struct device *dev,
4588 struct device_attribute *attr, char *buf)
4590 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4593 return sprintf(buf, "%d\n", rbd_dev->major);
4595 return sprintf(buf, "(none)\n");
4598 static ssize_t rbd_minor_show(struct device *dev,
4599 struct device_attribute *attr, char *buf)
4601 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4603 return sprintf(buf, "%d\n", rbd_dev->minor);
4606 static ssize_t rbd_client_addr_show(struct device *dev,
4607 struct device_attribute *attr, char *buf)
4609 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4610 struct ceph_entity_addr *client_addr =
4611 ceph_client_addr(rbd_dev->rbd_client->client);
4613 return sprintf(buf, "%pISpc/%u\n", &client_addr->in_addr,
4614 le32_to_cpu(client_addr->nonce));
4617 static ssize_t rbd_client_id_show(struct device *dev,
4618 struct device_attribute *attr, char *buf)
4620 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4622 return sprintf(buf, "client%lld\n",
4623 ceph_client_gid(rbd_dev->rbd_client->client));
4626 static ssize_t rbd_cluster_fsid_show(struct device *dev,
4627 struct device_attribute *attr, char *buf)
4629 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4631 return sprintf(buf, "%pU\n", &rbd_dev->rbd_client->client->fsid);
4634 static ssize_t rbd_config_info_show(struct device *dev,
4635 struct device_attribute *attr, char *buf)
4637 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4639 return sprintf(buf, "%s\n", rbd_dev->config_info);
4642 static ssize_t rbd_pool_show(struct device *dev,
4643 struct device_attribute *attr, char *buf)
4645 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4647 return sprintf(buf, "%s\n", rbd_dev->spec->pool_name);
4650 static ssize_t rbd_pool_id_show(struct device *dev,
4651 struct device_attribute *attr, char *buf)
4653 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4655 return sprintf(buf, "%llu\n",
4656 (unsigned long long) rbd_dev->spec->pool_id);
4659 static ssize_t rbd_name_show(struct device *dev,
4660 struct device_attribute *attr, char *buf)
4662 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4664 if (rbd_dev->spec->image_name)
4665 return sprintf(buf, "%s\n", rbd_dev->spec->image_name);
4667 return sprintf(buf, "(unknown)\n");
4670 static ssize_t rbd_image_id_show(struct device *dev,
4671 struct device_attribute *attr, char *buf)
4673 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4675 return sprintf(buf, "%s\n", rbd_dev->spec->image_id);
4679 * Shows the name of the currently-mapped snapshot (or
4680 * RBD_SNAP_HEAD_NAME for the base image).
4682 static ssize_t rbd_snap_show(struct device *dev,
4683 struct device_attribute *attr,
4686 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4688 return sprintf(buf, "%s\n", rbd_dev->spec->snap_name);
4691 static ssize_t rbd_snap_id_show(struct device *dev,
4692 struct device_attribute *attr, char *buf)
4694 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4696 return sprintf(buf, "%llu\n", rbd_dev->spec->snap_id);
4700 * For a v2 image, shows the chain of parent images, separated by empty
4701 * lines. For v1 images or if there is no parent, shows "(no parent
4704 static ssize_t rbd_parent_show(struct device *dev,
4705 struct device_attribute *attr,
4708 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4711 if (!rbd_dev->parent)
4712 return sprintf(buf, "(no parent image)\n");
4714 for ( ; rbd_dev->parent; rbd_dev = rbd_dev->parent) {
4715 struct rbd_spec *spec = rbd_dev->parent_spec;
4717 count += sprintf(&buf[count], "%s"
4718 "pool_id %llu\npool_name %s\n"
4719 "image_id %s\nimage_name %s\n"
4720 "snap_id %llu\nsnap_name %s\n"
4722 !count ? "" : "\n", /* first? */
4723 spec->pool_id, spec->pool_name,
4724 spec->image_id, spec->image_name ?: "(unknown)",
4725 spec->snap_id, spec->snap_name,
4726 rbd_dev->parent_overlap);
4732 static ssize_t rbd_image_refresh(struct device *dev,
4733 struct device_attribute *attr,
4737 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4740 ret = rbd_dev_refresh(rbd_dev);
4747 static DEVICE_ATTR(size, S_IRUGO, rbd_size_show, NULL);
4748 static DEVICE_ATTR(features, S_IRUGO, rbd_features_show, NULL);
4749 static DEVICE_ATTR(major, S_IRUGO, rbd_major_show, NULL);
4750 static DEVICE_ATTR(minor, S_IRUGO, rbd_minor_show, NULL);
4751 static DEVICE_ATTR(client_addr, S_IRUGO, rbd_client_addr_show, NULL);
4752 static DEVICE_ATTR(client_id, S_IRUGO, rbd_client_id_show, NULL);
4753 static DEVICE_ATTR(cluster_fsid, S_IRUGO, rbd_cluster_fsid_show, NULL);
4754 static DEVICE_ATTR(config_info, S_IRUSR, rbd_config_info_show, NULL);
4755 static DEVICE_ATTR(pool, S_IRUGO, rbd_pool_show, NULL);
4756 static DEVICE_ATTR(pool_id, S_IRUGO, rbd_pool_id_show, NULL);
4757 static DEVICE_ATTR(name, S_IRUGO, rbd_name_show, NULL);
4758 static DEVICE_ATTR(image_id, S_IRUGO, rbd_image_id_show, NULL);
4759 static DEVICE_ATTR(refresh, S_IWUSR, NULL, rbd_image_refresh);
4760 static DEVICE_ATTR(current_snap, S_IRUGO, rbd_snap_show, NULL);
4761 static DEVICE_ATTR(snap_id, S_IRUGO, rbd_snap_id_show, NULL);
4762 static DEVICE_ATTR(parent, S_IRUGO, rbd_parent_show, NULL);
4764 static struct attribute *rbd_attrs[] = {
4765 &dev_attr_size.attr,
4766 &dev_attr_features.attr,
4767 &dev_attr_major.attr,
4768 &dev_attr_minor.attr,
4769 &dev_attr_client_addr.attr,
4770 &dev_attr_client_id.attr,
4771 &dev_attr_cluster_fsid.attr,
4772 &dev_attr_config_info.attr,
4773 &dev_attr_pool.attr,
4774 &dev_attr_pool_id.attr,
4775 &dev_attr_name.attr,
4776 &dev_attr_image_id.attr,
4777 &dev_attr_current_snap.attr,
4778 &dev_attr_snap_id.attr,
4779 &dev_attr_parent.attr,
4780 &dev_attr_refresh.attr,
4784 static struct attribute_group rbd_attr_group = {
4788 static const struct attribute_group *rbd_attr_groups[] = {
4793 static void rbd_dev_release(struct device *dev);
4795 static struct device_type rbd_device_type = {
4797 .groups = rbd_attr_groups,
4798 .release = rbd_dev_release,
4801 static struct rbd_spec *rbd_spec_get(struct rbd_spec *spec)
4803 kref_get(&spec->kref);
4808 static void rbd_spec_free(struct kref *kref);
4809 static void rbd_spec_put(struct rbd_spec *spec)
4812 kref_put(&spec->kref, rbd_spec_free);
4815 static struct rbd_spec *rbd_spec_alloc(void)
4817 struct rbd_spec *spec;
4819 spec = kzalloc(sizeof (*spec), GFP_KERNEL);
4823 spec->pool_id = CEPH_NOPOOL;
4824 spec->snap_id = CEPH_NOSNAP;
4825 kref_init(&spec->kref);
4830 static void rbd_spec_free(struct kref *kref)
4832 struct rbd_spec *spec = container_of(kref, struct rbd_spec, kref);
4834 kfree(spec->pool_name);
4835 kfree(spec->image_id);
4836 kfree(spec->image_name);
4837 kfree(spec->snap_name);
4841 static void rbd_dev_free(struct rbd_device *rbd_dev)
4843 WARN_ON(rbd_dev->watch_state != RBD_WATCH_STATE_UNREGISTERED);
4844 WARN_ON(rbd_dev->lock_state != RBD_LOCK_STATE_UNLOCKED);
4846 ceph_oid_destroy(&rbd_dev->header_oid);
4847 ceph_oloc_destroy(&rbd_dev->header_oloc);
4848 kfree(rbd_dev->config_info);
4850 rbd_put_client(rbd_dev->rbd_client);
4851 rbd_spec_put(rbd_dev->spec);
4852 kfree(rbd_dev->opts);
4856 static void rbd_dev_release(struct device *dev)
4858 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4859 bool need_put = !!rbd_dev->opts;
4862 destroy_workqueue(rbd_dev->task_wq);
4863 ida_simple_remove(&rbd_dev_id_ida, rbd_dev->dev_id);
4866 rbd_dev_free(rbd_dev);
4869 * This is racy, but way better than putting module outside of
4870 * the release callback. The race window is pretty small, so
4871 * doing something similar to dm (dm-builtin.c) is overkill.
4874 module_put(THIS_MODULE);
4877 static struct rbd_device *__rbd_dev_create(struct rbd_client *rbdc,
4878 struct rbd_spec *spec)
4880 struct rbd_device *rbd_dev;
4882 rbd_dev = kzalloc(sizeof(*rbd_dev), GFP_KERNEL);
4886 spin_lock_init(&rbd_dev->lock);
4887 INIT_LIST_HEAD(&rbd_dev->node);
4888 init_rwsem(&rbd_dev->header_rwsem);
4890 ceph_oid_init(&rbd_dev->header_oid);
4891 ceph_oloc_init(&rbd_dev->header_oloc);
4893 mutex_init(&rbd_dev->watch_mutex);
4894 rbd_dev->watch_state = RBD_WATCH_STATE_UNREGISTERED;
4895 INIT_DELAYED_WORK(&rbd_dev->watch_dwork, rbd_reregister_watch);
4897 init_rwsem(&rbd_dev->lock_rwsem);
4898 rbd_dev->lock_state = RBD_LOCK_STATE_UNLOCKED;
4899 INIT_WORK(&rbd_dev->acquired_lock_work, rbd_notify_acquired_lock);
4900 INIT_WORK(&rbd_dev->released_lock_work, rbd_notify_released_lock);
4901 INIT_DELAYED_WORK(&rbd_dev->lock_dwork, rbd_acquire_lock);
4902 INIT_WORK(&rbd_dev->unlock_work, rbd_release_lock_work);
4903 init_waitqueue_head(&rbd_dev->lock_waitq);
4905 rbd_dev->dev.bus = &rbd_bus_type;
4906 rbd_dev->dev.type = &rbd_device_type;
4907 rbd_dev->dev.parent = &rbd_root_dev;
4908 device_initialize(&rbd_dev->dev);
4910 rbd_dev->rbd_client = rbdc;
4911 rbd_dev->spec = spec;
4913 rbd_dev->layout.stripe_unit = 1 << RBD_MAX_OBJ_ORDER;
4914 rbd_dev->layout.stripe_count = 1;
4915 rbd_dev->layout.object_size = 1 << RBD_MAX_OBJ_ORDER;
4916 rbd_dev->layout.pool_id = spec->pool_id;
4917 RCU_INIT_POINTER(rbd_dev->layout.pool_ns, NULL);
4923 * Create a mapping rbd_dev.
4925 static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
4926 struct rbd_spec *spec,
4927 struct rbd_options *opts)
4929 struct rbd_device *rbd_dev;
4931 rbd_dev = __rbd_dev_create(rbdc, spec);
4935 rbd_dev->opts = opts;
4937 /* get an id and fill in device name */
4938 rbd_dev->dev_id = ida_simple_get(&rbd_dev_id_ida, 0,
4939 minor_to_rbd_dev_id(1 << MINORBITS),
4941 if (rbd_dev->dev_id < 0)
4944 sprintf(rbd_dev->name, RBD_DRV_NAME "%d", rbd_dev->dev_id);
4945 rbd_dev->task_wq = alloc_ordered_workqueue("%s-tasks", WQ_MEM_RECLAIM,
4947 if (!rbd_dev->task_wq)
4950 /* we have a ref from do_rbd_add() */
4951 __module_get(THIS_MODULE);
4953 dout("%s rbd_dev %p dev_id %d\n", __func__, rbd_dev, rbd_dev->dev_id);
4957 ida_simple_remove(&rbd_dev_id_ida, rbd_dev->dev_id);
4959 rbd_dev_free(rbd_dev);
4963 static void rbd_dev_destroy(struct rbd_device *rbd_dev)
4966 put_device(&rbd_dev->dev);
4970 * Get the size and object order for an image snapshot, or if
4971 * snap_id is CEPH_NOSNAP, gets this information for the base
4974 static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
4975 u8 *order, u64 *snap_size)
4977 __le64 snapid = cpu_to_le64(snap_id);
4982 } __attribute__ ((packed)) size_buf = { 0 };
4984 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_oid.name,
4986 &snapid, sizeof (snapid),
4987 &size_buf, sizeof (size_buf));
4988 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4991 if (ret < sizeof (size_buf))
4995 *order = size_buf.order;
4996 dout(" order %u", (unsigned int)*order);
4998 *snap_size = le64_to_cpu(size_buf.size);
5000 dout(" snap_id 0x%016llx snap_size = %llu\n",
5001 (unsigned long long)snap_id,
5002 (unsigned long long)*snap_size);
5007 static int rbd_dev_v2_image_size(struct rbd_device *rbd_dev)
5009 return _rbd_dev_v2_snap_size(rbd_dev, CEPH_NOSNAP,
5010 &rbd_dev->header.obj_order,
5011 &rbd_dev->header.image_size);
5014 static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev)
5020 reply_buf = kzalloc(RBD_OBJ_PREFIX_LEN_MAX, GFP_KERNEL);
5024 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_oid.name,
5025 "rbd", "get_object_prefix", NULL, 0,
5026 reply_buf, RBD_OBJ_PREFIX_LEN_MAX);
5027 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
5032 rbd_dev->header.object_prefix = ceph_extract_encoded_string(&p,
5033 p + ret, NULL, GFP_NOIO);
5036 if (IS_ERR(rbd_dev->header.object_prefix)) {
5037 ret = PTR_ERR(rbd_dev->header.object_prefix);
5038 rbd_dev->header.object_prefix = NULL;
5040 dout(" object_prefix = %s\n", rbd_dev->header.object_prefix);
5048 static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
5051 __le64 snapid = cpu_to_le64(snap_id);
5055 } __attribute__ ((packed)) features_buf = { 0 };
5059 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_oid.name,
5060 "rbd", "get_features",
5061 &snapid, sizeof (snapid),
5062 &features_buf, sizeof (features_buf));
5063 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
5066 if (ret < sizeof (features_buf))
5069 unsup = le64_to_cpu(features_buf.incompat) & ~RBD_FEATURES_SUPPORTED;
5071 rbd_warn(rbd_dev, "image uses unsupported features: 0x%llx",
5076 *snap_features = le64_to_cpu(features_buf.features);
5078 dout(" snap_id 0x%016llx features = 0x%016llx incompat = 0x%016llx\n",
5079 (unsigned long long)snap_id,
5080 (unsigned long long)*snap_features,
5081 (unsigned long long)le64_to_cpu(features_buf.incompat));
5086 static int rbd_dev_v2_features(struct rbd_device *rbd_dev)
5088 return _rbd_dev_v2_snap_features(rbd_dev, CEPH_NOSNAP,
5089 &rbd_dev->header.features);
5092 static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
5094 struct rbd_spec *parent_spec;
5096 void *reply_buf = NULL;
5106 parent_spec = rbd_spec_alloc();
5110 size = sizeof (__le64) + /* pool_id */
5111 sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX + /* image_id */
5112 sizeof (__le64) + /* snap_id */
5113 sizeof (__le64); /* overlap */
5114 reply_buf = kmalloc(size, GFP_KERNEL);
5120 snapid = cpu_to_le64(rbd_dev->spec->snap_id);
5121 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_oid.name,
5122 "rbd", "get_parent",
5123 &snapid, sizeof (snapid),
5125 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
5130 end = reply_buf + ret;
5132 ceph_decode_64_safe(&p, end, pool_id, out_err);
5133 if (pool_id == CEPH_NOPOOL) {
5135 * Either the parent never existed, or we have
5136 * record of it but the image got flattened so it no
5137 * longer has a parent. When the parent of a
5138 * layered image disappears we immediately set the
5139 * overlap to 0. The effect of this is that all new
5140 * requests will be treated as if the image had no
5143 if (rbd_dev->parent_overlap) {
5144 rbd_dev->parent_overlap = 0;
5145 rbd_dev_parent_put(rbd_dev);
5146 pr_info("%s: clone image has been flattened\n",
5147 rbd_dev->disk->disk_name);
5150 goto out; /* No parent? No problem. */
5153 /* The ceph file layout needs to fit pool id in 32 bits */
5156 if (pool_id > (u64)U32_MAX) {
5157 rbd_warn(NULL, "parent pool id too large (%llu > %u)",
5158 (unsigned long long)pool_id, U32_MAX);
5162 image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
5163 if (IS_ERR(image_id)) {
5164 ret = PTR_ERR(image_id);
5167 ceph_decode_64_safe(&p, end, snap_id, out_err);
5168 ceph_decode_64_safe(&p, end, overlap, out_err);
5171 * The parent won't change (except when the clone is
5172 * flattened, already handled that). So we only need to
5173 * record the parent spec we have not already done so.
5175 if (!rbd_dev->parent_spec) {
5176 parent_spec->pool_id = pool_id;
5177 parent_spec->image_id = image_id;
5178 parent_spec->snap_id = snap_id;
5179 rbd_dev->parent_spec = parent_spec;
5180 parent_spec = NULL; /* rbd_dev now owns this */
5186 * We always update the parent overlap. If it's zero we issue
5187 * a warning, as we will proceed as if there was no parent.
5191 /* refresh, careful to warn just once */
5192 if (rbd_dev->parent_overlap)
5194 "clone now standalone (overlap became 0)");
5197 rbd_warn(rbd_dev, "clone is standalone (overlap 0)");
5200 rbd_dev->parent_overlap = overlap;
5206 rbd_spec_put(parent_spec);
5211 static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev)
5215 __le64 stripe_count;
5216 } __attribute__ ((packed)) striping_info_buf = { 0 };
5217 size_t size = sizeof (striping_info_buf);
5224 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_oid.name,
5225 "rbd", "get_stripe_unit_count", NULL, 0,
5226 (char *)&striping_info_buf, size);
5227 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
5234 * We don't actually support the "fancy striping" feature
5235 * (STRIPINGV2) yet, but if the striping sizes are the
5236 * defaults the behavior is the same as before. So find
5237 * out, and only fail if the image has non-default values.
5240 obj_size = (u64)1 << rbd_dev->header.obj_order;
5241 p = &striping_info_buf;
5242 stripe_unit = ceph_decode_64(&p);
5243 if (stripe_unit != obj_size) {
5244 rbd_warn(rbd_dev, "unsupported stripe unit "
5245 "(got %llu want %llu)",
5246 stripe_unit, obj_size);
5249 stripe_count = ceph_decode_64(&p);
5250 if (stripe_count != 1) {
5251 rbd_warn(rbd_dev, "unsupported stripe count "
5252 "(got %llu want 1)", stripe_count);
5255 rbd_dev->header.stripe_unit = stripe_unit;
5256 rbd_dev->header.stripe_count = stripe_count;
5261 static char *rbd_dev_image_name(struct rbd_device *rbd_dev)
5263 size_t image_id_size;
5268 void *reply_buf = NULL;
5270 char *image_name = NULL;
5273 rbd_assert(!rbd_dev->spec->image_name);
5275 len = strlen(rbd_dev->spec->image_id);
5276 image_id_size = sizeof (__le32) + len;
5277 image_id = kmalloc(image_id_size, GFP_KERNEL);
5282 end = image_id + image_id_size;
5283 ceph_encode_string(&p, end, rbd_dev->spec->image_id, (u32)len);
5285 size = sizeof (__le32) + RBD_IMAGE_NAME_LEN_MAX;
5286 reply_buf = kmalloc(size, GFP_KERNEL);
5290 ret = rbd_obj_method_sync(rbd_dev, RBD_DIRECTORY,
5291 "rbd", "dir_get_name",
5292 image_id, image_id_size,
5297 end = reply_buf + ret;
5299 image_name = ceph_extract_encoded_string(&p, end, &len, GFP_KERNEL);
5300 if (IS_ERR(image_name))
5303 dout("%s: name is %s len is %zd\n", __func__, image_name, len);
5311 static u64 rbd_v1_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
5313 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
5314 const char *snap_name;
5317 /* Skip over names until we find the one we are looking for */
5319 snap_name = rbd_dev->header.snap_names;
5320 while (which < snapc->num_snaps) {
5321 if (!strcmp(name, snap_name))
5322 return snapc->snaps[which];
5323 snap_name += strlen(snap_name) + 1;
5329 static u64 rbd_v2_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
5331 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
5336 for (which = 0; !found && which < snapc->num_snaps; which++) {
5337 const char *snap_name;
5339 snap_id = snapc->snaps[which];
5340 snap_name = rbd_dev_v2_snap_name(rbd_dev, snap_id);
5341 if (IS_ERR(snap_name)) {
5342 /* ignore no-longer existing snapshots */
5343 if (PTR_ERR(snap_name) == -ENOENT)
5348 found = !strcmp(name, snap_name);
5351 return found ? snap_id : CEPH_NOSNAP;
5355 * Assumes name is never RBD_SNAP_HEAD_NAME; returns CEPH_NOSNAP if
5356 * no snapshot by that name is found, or if an error occurs.
5358 static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
5360 if (rbd_dev->image_format == 1)
5361 return rbd_v1_snap_id_by_name(rbd_dev, name);
5363 return rbd_v2_snap_id_by_name(rbd_dev, name);
5367 * An image being mapped will have everything but the snap id.
5369 static int rbd_spec_fill_snap_id(struct rbd_device *rbd_dev)
5371 struct rbd_spec *spec = rbd_dev->spec;
5373 rbd_assert(spec->pool_id != CEPH_NOPOOL && spec->pool_name);
5374 rbd_assert(spec->image_id && spec->image_name);
5375 rbd_assert(spec->snap_name);
5377 if (strcmp(spec->snap_name, RBD_SNAP_HEAD_NAME)) {
5380 snap_id = rbd_snap_id_by_name(rbd_dev, spec->snap_name);
5381 if (snap_id == CEPH_NOSNAP)
5384 spec->snap_id = snap_id;
5386 spec->snap_id = CEPH_NOSNAP;
5393 * A parent image will have all ids but none of the names.
5395 * All names in an rbd spec are dynamically allocated. It's OK if we
5396 * can't figure out the name for an image id.
5398 static int rbd_spec_fill_names(struct rbd_device *rbd_dev)
5400 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
5401 struct rbd_spec *spec = rbd_dev->spec;
5402 const char *pool_name;
5403 const char *image_name;
5404 const char *snap_name;
5407 rbd_assert(spec->pool_id != CEPH_NOPOOL);
5408 rbd_assert(spec->image_id);
5409 rbd_assert(spec->snap_id != CEPH_NOSNAP);
5411 /* Get the pool name; we have to make our own copy of this */
5413 pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, spec->pool_id);
5415 rbd_warn(rbd_dev, "no pool with id %llu", spec->pool_id);
5418 pool_name = kstrdup(pool_name, GFP_KERNEL);
5422 /* Fetch the image name; tolerate failure here */
5424 image_name = rbd_dev_image_name(rbd_dev);
5426 rbd_warn(rbd_dev, "unable to get image name");
5428 /* Fetch the snapshot name */
5430 snap_name = rbd_snap_name(rbd_dev, spec->snap_id);
5431 if (IS_ERR(snap_name)) {
5432 ret = PTR_ERR(snap_name);
5436 spec->pool_name = pool_name;
5437 spec->image_name = image_name;
5438 spec->snap_name = snap_name;
5448 static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev)
5457 struct ceph_snap_context *snapc;
5461 * We'll need room for the seq value (maximum snapshot id),
5462 * snapshot count, and array of that many snapshot ids.
5463 * For now we have a fixed upper limit on the number we're
5464 * prepared to receive.
5466 size = sizeof (__le64) + sizeof (__le32) +
5467 RBD_MAX_SNAP_COUNT * sizeof (__le64);
5468 reply_buf = kzalloc(size, GFP_KERNEL);
5472 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_oid.name,
5473 "rbd", "get_snapcontext", NULL, 0,
5475 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
5480 end = reply_buf + ret;
5482 ceph_decode_64_safe(&p, end, seq, out);
5483 ceph_decode_32_safe(&p, end, snap_count, out);
5486 * Make sure the reported number of snapshot ids wouldn't go
5487 * beyond the end of our buffer. But before checking that,
5488 * make sure the computed size of the snapshot context we
5489 * allocate is representable in a size_t.
5491 if (snap_count > (SIZE_MAX - sizeof (struct ceph_snap_context))
5496 if (!ceph_has_room(&p, end, snap_count * sizeof (__le64)))
5500 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
5506 for (i = 0; i < snap_count; i++)
5507 snapc->snaps[i] = ceph_decode_64(&p);
5509 ceph_put_snap_context(rbd_dev->header.snapc);
5510 rbd_dev->header.snapc = snapc;
5512 dout(" snap context seq = %llu, snap_count = %u\n",
5513 (unsigned long long)seq, (unsigned int)snap_count);
5520 static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
5531 size = sizeof (__le32) + RBD_MAX_SNAP_NAME_LEN;
5532 reply_buf = kmalloc(size, GFP_KERNEL);
5534 return ERR_PTR(-ENOMEM);
5536 snapid = cpu_to_le64(snap_id);
5537 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_oid.name,
5538 "rbd", "get_snapshot_name",
5539 &snapid, sizeof (snapid),
5541 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
5543 snap_name = ERR_PTR(ret);
5548 end = reply_buf + ret;
5549 snap_name = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
5550 if (IS_ERR(snap_name))
5553 dout(" snap_id 0x%016llx snap_name = %s\n",
5554 (unsigned long long)snap_id, snap_name);
5561 static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev)
5563 bool first_time = rbd_dev->header.object_prefix == NULL;
5566 ret = rbd_dev_v2_image_size(rbd_dev);
5571 ret = rbd_dev_v2_header_onetime(rbd_dev);
5576 ret = rbd_dev_v2_snap_context(rbd_dev);
5577 if (ret && first_time) {
5578 kfree(rbd_dev->header.object_prefix);
5579 rbd_dev->header.object_prefix = NULL;
5585 static int rbd_dev_header_info(struct rbd_device *rbd_dev)
5587 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
5589 if (rbd_dev->image_format == 1)
5590 return rbd_dev_v1_header_info(rbd_dev);
5592 return rbd_dev_v2_header_info(rbd_dev);
5596 * Skips over white space at *buf, and updates *buf to point to the
5597 * first found non-space character (if any). Returns the length of
5598 * the token (string of non-white space characters) found. Note
5599 * that *buf must be terminated with '\0'.
5601 static inline size_t next_token(const char **buf)
5604 * These are the characters that produce nonzero for
5605 * isspace() in the "C" and "POSIX" locales.
5607 const char *spaces = " \f\n\r\t\v";
5609 *buf += strspn(*buf, spaces); /* Find start of token */
5611 return strcspn(*buf, spaces); /* Return token length */
5615 * Finds the next token in *buf, dynamically allocates a buffer big
5616 * enough to hold a copy of it, and copies the token into the new
5617 * buffer. The copy is guaranteed to be terminated with '\0'. Note
5618 * that a duplicate buffer is created even for a zero-length token.
5620 * Returns a pointer to the newly-allocated duplicate, or a null
5621 * pointer if memory for the duplicate was not available. If
5622 * the lenp argument is a non-null pointer, the length of the token
5623 * (not including the '\0') is returned in *lenp.
5625 * If successful, the *buf pointer will be updated to point beyond
5626 * the end of the found token.
5628 * Note: uses GFP_KERNEL for allocation.
5630 static inline char *dup_token(const char **buf, size_t *lenp)
5635 len = next_token(buf);
5636 dup = kmemdup(*buf, len + 1, GFP_KERNEL);
5639 *(dup + len) = '\0';
5649 * Parse the options provided for an "rbd add" (i.e., rbd image
5650 * mapping) request. These arrive via a write to /sys/bus/rbd/add,
5651 * and the data written is passed here via a NUL-terminated buffer.
5652 * Returns 0 if successful or an error code otherwise.
5654 * The information extracted from these options is recorded in
5655 * the other parameters which return dynamically-allocated
5658 * The address of a pointer that will refer to a ceph options
5659 * structure. Caller must release the returned pointer using
5660 * ceph_destroy_options() when it is no longer needed.
5662 * Address of an rbd options pointer. Fully initialized by
5663 * this function; caller must release with kfree().
5665 * Address of an rbd image specification pointer. Fully
5666 * initialized by this function based on parsed options.
5667 * Caller must release with rbd_spec_put().
5669 * The options passed take this form:
5670 * <mon_addrs> <options> <pool_name> <image_name> [<snap_id>]
5673 * A comma-separated list of one or more monitor addresses.
5674 * A monitor address is an ip address, optionally followed
5675 * by a port number (separated by a colon).
5676 * I.e.: ip1[:port1][,ip2[:port2]...]
5678 * A comma-separated list of ceph and/or rbd options.
5680 * The name of the rados pool containing the rbd image.
5682 * The name of the image in that pool to map.
5684 * An optional snapshot id. If provided, the mapping will
5685 * present data from the image at the time that snapshot was
5686 * created. The image head is used if no snapshot id is
5687 * provided. Snapshot mappings are always read-only.
5689 static int rbd_add_parse_args(const char *buf,
5690 struct ceph_options **ceph_opts,
5691 struct rbd_options **opts,
5692 struct rbd_spec **rbd_spec)
5696 const char *mon_addrs;
5698 size_t mon_addrs_size;
5699 struct rbd_spec *spec = NULL;
5700 struct rbd_options *rbd_opts = NULL;
5701 struct ceph_options *copts;
5704 /* The first four tokens are required */
5706 len = next_token(&buf);
5708 rbd_warn(NULL, "no monitor address(es) provided");
5712 mon_addrs_size = len + 1;
5716 options = dup_token(&buf, NULL);
5720 rbd_warn(NULL, "no options provided");
5724 spec = rbd_spec_alloc();
5728 spec->pool_name = dup_token(&buf, NULL);
5729 if (!spec->pool_name)
5731 if (!*spec->pool_name) {
5732 rbd_warn(NULL, "no pool name provided");
5736 spec->image_name = dup_token(&buf, NULL);
5737 if (!spec->image_name)
5739 if (!*spec->image_name) {
5740 rbd_warn(NULL, "no image name provided");
5745 * Snapshot name is optional; default is to use "-"
5746 * (indicating the head/no snapshot).
5748 len = next_token(&buf);
5750 buf = RBD_SNAP_HEAD_NAME; /* No snapshot supplied */
5751 len = sizeof (RBD_SNAP_HEAD_NAME) - 1;
5752 } else if (len > RBD_MAX_SNAP_NAME_LEN) {
5753 ret = -ENAMETOOLONG;
5756 snap_name = kmemdup(buf, len + 1, GFP_KERNEL);
5759 *(snap_name + len) = '\0';
5760 spec->snap_name = snap_name;
5762 /* Initialize all rbd options to the defaults */
5764 rbd_opts = kzalloc(sizeof (*rbd_opts), GFP_KERNEL);
5768 rbd_opts->read_only = RBD_READ_ONLY_DEFAULT;
5769 rbd_opts->queue_depth = RBD_QUEUE_DEPTH_DEFAULT;
5770 rbd_opts->lock_on_read = RBD_LOCK_ON_READ_DEFAULT;
5772 copts = ceph_parse_options(options, mon_addrs,
5773 mon_addrs + mon_addrs_size - 1,
5774 parse_rbd_opts_token, rbd_opts);
5775 if (IS_ERR(copts)) {
5776 ret = PTR_ERR(copts);
5797 * Return pool id (>= 0) or a negative error code.
5799 static int rbd_add_get_pool_id(struct rbd_client *rbdc, const char *pool_name)
5801 struct ceph_options *opts = rbdc->client->options;
5807 ret = ceph_pg_poolid_by_name(rbdc->client->osdc.osdmap, pool_name);
5808 if (ret == -ENOENT && tries++ < 1) {
5809 ret = ceph_monc_get_version(&rbdc->client->monc, "osdmap",
5814 if (rbdc->client->osdc.osdmap->epoch < newest_epoch) {
5815 ceph_osdc_maybe_request_map(&rbdc->client->osdc);
5816 (void) ceph_monc_wait_osdmap(&rbdc->client->monc,
5818 opts->mount_timeout);
5821 /* the osdmap we have is new enough */
5830 * An rbd format 2 image has a unique identifier, distinct from the
5831 * name given to it by the user. Internally, that identifier is
5832 * what's used to specify the names of objects related to the image.
5834 * A special "rbd id" object is used to map an rbd image name to its
5835 * id. If that object doesn't exist, then there is no v2 rbd image
5836 * with the supplied name.
5838 * This function will record the given rbd_dev's image_id field if
5839 * it can be determined, and in that case will return 0. If any
5840 * errors occur a negative errno will be returned and the rbd_dev's
5841 * image_id field will be unchanged (and should be NULL).
5843 static int rbd_dev_image_id(struct rbd_device *rbd_dev)
5852 * When probing a parent image, the image id is already
5853 * known (and the image name likely is not). There's no
5854 * need to fetch the image id again in this case. We
5855 * do still need to set the image format though.
5857 if (rbd_dev->spec->image_id) {
5858 rbd_dev->image_format = *rbd_dev->spec->image_id ? 2 : 1;
5864 * First, see if the format 2 image id file exists, and if
5865 * so, get the image's persistent id from it.
5867 size = sizeof (RBD_ID_PREFIX) + strlen(rbd_dev->spec->image_name);
5868 object_name = kmalloc(size, GFP_NOIO);
5871 sprintf(object_name, "%s%s", RBD_ID_PREFIX, rbd_dev->spec->image_name);
5872 dout("rbd id object name is %s\n", object_name);
5874 /* Response will be an encoded string, which includes a length */
5876 size = sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX;
5877 response = kzalloc(size, GFP_NOIO);
5883 /* If it doesn't exist we'll assume it's a format 1 image */
5885 ret = rbd_obj_method_sync(rbd_dev, object_name,
5886 "rbd", "get_id", NULL, 0,
5887 response, RBD_IMAGE_ID_LEN_MAX);
5888 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
5889 if (ret == -ENOENT) {
5890 image_id = kstrdup("", GFP_KERNEL);
5891 ret = image_id ? 0 : -ENOMEM;
5893 rbd_dev->image_format = 1;
5894 } else if (ret >= 0) {
5897 image_id = ceph_extract_encoded_string(&p, p + ret,
5899 ret = PTR_ERR_OR_ZERO(image_id);
5901 rbd_dev->image_format = 2;
5905 rbd_dev->spec->image_id = image_id;
5906 dout("image_id is %s\n", image_id);
5916 * Undo whatever state changes are made by v1 or v2 header info
5919 static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
5921 struct rbd_image_header *header;
5923 rbd_dev_parent_put(rbd_dev);
5925 /* Free dynamic fields from the header, then zero it out */
5927 header = &rbd_dev->header;
5928 ceph_put_snap_context(header->snapc);
5929 kfree(header->snap_sizes);
5930 kfree(header->snap_names);
5931 kfree(header->object_prefix);
5932 memset(header, 0, sizeof (*header));
5935 static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev)
5939 ret = rbd_dev_v2_object_prefix(rbd_dev);
5944 * Get the and check features for the image. Currently the
5945 * features are assumed to never change.
5947 ret = rbd_dev_v2_features(rbd_dev);
5951 /* If the image supports fancy striping, get its parameters */
5953 if (rbd_dev->header.features & RBD_FEATURE_STRIPINGV2) {
5954 ret = rbd_dev_v2_striping_info(rbd_dev);
5958 /* No support for crypto and compression type format 2 images */
5962 rbd_dev->header.features = 0;
5963 kfree(rbd_dev->header.object_prefix);
5964 rbd_dev->header.object_prefix = NULL;
5970 * @depth is rbd_dev_image_probe() -> rbd_dev_probe_parent() ->
5971 * rbd_dev_image_probe() recursion depth, which means it's also the
5972 * length of the already discovered part of the parent chain.
5974 static int rbd_dev_probe_parent(struct rbd_device *rbd_dev, int depth)
5976 struct rbd_device *parent = NULL;
5979 if (!rbd_dev->parent_spec)
5982 if (++depth > RBD_MAX_PARENT_CHAIN_LEN) {
5983 pr_info("parent chain is too long (%d)\n", depth);
5988 parent = __rbd_dev_create(rbd_dev->rbd_client, rbd_dev->parent_spec);
5995 * Images related by parent/child relationships always share
5996 * rbd_client and spec/parent_spec, so bump their refcounts.
5998 __rbd_get_client(rbd_dev->rbd_client);
5999 rbd_spec_get(rbd_dev->parent_spec);
6001 ret = rbd_dev_image_probe(parent, depth);
6005 rbd_dev->parent = parent;
6006 atomic_set(&rbd_dev->parent_ref, 1);
6010 rbd_dev_unparent(rbd_dev);
6011 rbd_dev_destroy(parent);
6016 * rbd_dev->header_rwsem must be locked for write and will be unlocked
6019 static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
6023 /* Record our major and minor device numbers. */
6025 if (!single_major) {
6026 ret = register_blkdev(0, rbd_dev->name);
6028 goto err_out_unlock;
6030 rbd_dev->major = ret;
6033 rbd_dev->major = rbd_major;
6034 rbd_dev->minor = rbd_dev_id_to_minor(rbd_dev->dev_id);
6037 /* Set up the blkdev mapping. */
6039 ret = rbd_init_disk(rbd_dev);
6041 goto err_out_blkdev;
6043 ret = rbd_dev_mapping_set(rbd_dev);
6047 set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
6048 set_disk_ro(rbd_dev->disk, rbd_dev->mapping.read_only);
6050 dev_set_name(&rbd_dev->dev, "%d", rbd_dev->dev_id);
6051 ret = device_add(&rbd_dev->dev);
6053 goto err_out_mapping;
6055 /* Everything's ready. Announce the disk to the world. */
6057 set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
6058 up_write(&rbd_dev->header_rwsem);
6060 spin_lock(&rbd_dev_list_lock);
6061 list_add_tail(&rbd_dev->node, &rbd_dev_list);
6062 spin_unlock(&rbd_dev_list_lock);
6064 add_disk(rbd_dev->disk);
6065 pr_info("%s: capacity %llu features 0x%llx\n", rbd_dev->disk->disk_name,
6066 (unsigned long long)get_capacity(rbd_dev->disk) << SECTOR_SHIFT,
6067 rbd_dev->header.features);
6072 rbd_dev_mapping_clear(rbd_dev);
6074 rbd_free_disk(rbd_dev);
6077 unregister_blkdev(rbd_dev->major, rbd_dev->name);
6079 up_write(&rbd_dev->header_rwsem);
6083 static int rbd_dev_header_name(struct rbd_device *rbd_dev)
6085 struct rbd_spec *spec = rbd_dev->spec;
6088 /* Record the header object name for this rbd image. */
6090 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
6092 rbd_dev->header_oloc.pool = rbd_dev->layout.pool_id;
6093 if (rbd_dev->image_format == 1)
6094 ret = ceph_oid_aprintf(&rbd_dev->header_oid, GFP_KERNEL, "%s%s",
6095 spec->image_name, RBD_SUFFIX);
6097 ret = ceph_oid_aprintf(&rbd_dev->header_oid, GFP_KERNEL, "%s%s",
6098 RBD_HEADER_PREFIX, spec->image_id);
6103 static void rbd_dev_image_release(struct rbd_device *rbd_dev)
6105 rbd_dev_unprobe(rbd_dev);
6106 rbd_dev->image_format = 0;
6107 kfree(rbd_dev->spec->image_id);
6108 rbd_dev->spec->image_id = NULL;
6110 rbd_dev_destroy(rbd_dev);
6114 * Probe for the existence of the header object for the given rbd
6115 * device. If this image is the one being mapped (i.e., not a
6116 * parent), initiate a watch on its header object before using that
6117 * object to get detailed information about the rbd image.
6119 static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth)
6124 * Get the id from the image id object. Unless there's an
6125 * error, rbd_dev->spec->image_id will be filled in with
6126 * a dynamically-allocated string, and rbd_dev->image_format
6127 * will be set to either 1 or 2.
6129 ret = rbd_dev_image_id(rbd_dev);
6133 ret = rbd_dev_header_name(rbd_dev);
6135 goto err_out_format;
6138 ret = rbd_register_watch(rbd_dev);
6141 pr_info("image %s/%s does not exist\n",
6142 rbd_dev->spec->pool_name,
6143 rbd_dev->spec->image_name);
6144 goto err_out_format;
6148 ret = rbd_dev_header_info(rbd_dev);
6153 * If this image is the one being mapped, we have pool name and
6154 * id, image name and id, and snap name - need to fill snap id.
6155 * Otherwise this is a parent image, identified by pool, image
6156 * and snap ids - need to fill in names for those ids.
6159 ret = rbd_spec_fill_snap_id(rbd_dev);
6161 ret = rbd_spec_fill_names(rbd_dev);
6164 pr_info("snap %s/%s@%s does not exist\n",
6165 rbd_dev->spec->pool_name,
6166 rbd_dev->spec->image_name,
6167 rbd_dev->spec->snap_name);
6171 if (rbd_dev->header.features & RBD_FEATURE_LAYERING) {
6172 ret = rbd_dev_v2_parent_info(rbd_dev);
6177 * Need to warn users if this image is the one being
6178 * mapped and has a parent.
6180 if (!depth && rbd_dev->parent_spec)
6182 "WARNING: kernel layering is EXPERIMENTAL!");
6185 ret = rbd_dev_probe_parent(rbd_dev, depth);
6189 dout("discovered format %u image, header name is %s\n",
6190 rbd_dev->image_format, rbd_dev->header_oid.name);
6194 rbd_dev_unprobe(rbd_dev);
6197 rbd_unregister_watch(rbd_dev);
6199 rbd_dev->image_format = 0;
6200 kfree(rbd_dev->spec->image_id);
6201 rbd_dev->spec->image_id = NULL;
6205 static ssize_t do_rbd_add(struct bus_type *bus,
6209 struct rbd_device *rbd_dev = NULL;
6210 struct ceph_options *ceph_opts = NULL;
6211 struct rbd_options *rbd_opts = NULL;
6212 struct rbd_spec *spec = NULL;
6213 struct rbd_client *rbdc;
6217 if (!try_module_get(THIS_MODULE))
6220 /* parse add command */
6221 rc = rbd_add_parse_args(buf, &ceph_opts, &rbd_opts, &spec);
6225 rbdc = rbd_get_client(ceph_opts);
6232 rc = rbd_add_get_pool_id(rbdc, spec->pool_name);
6235 pr_info("pool %s does not exist\n", spec->pool_name);
6236 goto err_out_client;
6238 spec->pool_id = (u64)rc;
6240 rbd_dev = rbd_dev_create(rbdc, spec, rbd_opts);
6243 goto err_out_client;
6245 rbdc = NULL; /* rbd_dev now owns this */
6246 spec = NULL; /* rbd_dev now owns this */
6247 rbd_opts = NULL; /* rbd_dev now owns this */
6249 rbd_dev->config_info = kstrdup(buf, GFP_KERNEL);
6250 if (!rbd_dev->config_info) {
6252 goto err_out_rbd_dev;
6255 down_write(&rbd_dev->header_rwsem);
6256 rc = rbd_dev_image_probe(rbd_dev, 0);
6258 up_write(&rbd_dev->header_rwsem);
6259 goto err_out_rbd_dev;
6262 /* If we are mapping a snapshot it must be marked read-only */
6264 read_only = rbd_dev->opts->read_only;
6265 if (rbd_dev->spec->snap_id != CEPH_NOSNAP)
6267 rbd_dev->mapping.read_only = read_only;
6269 rc = rbd_dev_device_setup(rbd_dev);
6272 * rbd_unregister_watch() can't be moved into
6273 * rbd_dev_image_release() without refactoring, see
6274 * commit 1f3ef78861ac.
6276 rbd_unregister_watch(rbd_dev);
6277 rbd_dev_image_release(rbd_dev);
6283 module_put(THIS_MODULE);
6287 rbd_dev_destroy(rbd_dev);
6289 rbd_put_client(rbdc);
6296 static ssize_t rbd_add(struct bus_type *bus,
6303 return do_rbd_add(bus, buf, count);
6306 static ssize_t rbd_add_single_major(struct bus_type *bus,
6310 return do_rbd_add(bus, buf, count);
6313 static void rbd_dev_device_release(struct rbd_device *rbd_dev)
6315 rbd_free_disk(rbd_dev);
6317 spin_lock(&rbd_dev_list_lock);
6318 list_del_init(&rbd_dev->node);
6319 spin_unlock(&rbd_dev_list_lock);
6321 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
6322 device_del(&rbd_dev->dev);
6323 rbd_dev_mapping_clear(rbd_dev);
6325 unregister_blkdev(rbd_dev->major, rbd_dev->name);
6328 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev)
6330 while (rbd_dev->parent) {
6331 struct rbd_device *first = rbd_dev;
6332 struct rbd_device *second = first->parent;
6333 struct rbd_device *third;
6336 * Follow to the parent with no grandparent and
6339 while (second && (third = second->parent)) {
6344 rbd_dev_image_release(second);
6345 first->parent = NULL;
6346 first->parent_overlap = 0;
6348 rbd_assert(first->parent_spec);
6349 rbd_spec_put(first->parent_spec);
6350 first->parent_spec = NULL;
6354 static ssize_t do_rbd_remove(struct bus_type *bus,
6358 struct rbd_device *rbd_dev = NULL;
6359 struct list_head *tmp;
6362 bool already = false;
6368 sscanf(buf, "%d %5s", &dev_id, opt_buf);
6370 pr_err("dev_id out of range\n");
6373 if (opt_buf[0] != '\0') {
6374 if (!strcmp(opt_buf, "force")) {
6377 pr_err("bad remove option at '%s'\n", opt_buf);
6383 spin_lock(&rbd_dev_list_lock);
6384 list_for_each(tmp, &rbd_dev_list) {
6385 rbd_dev = list_entry(tmp, struct rbd_device, node);
6386 if (rbd_dev->dev_id == dev_id) {
6392 spin_lock_irq(&rbd_dev->lock);
6393 if (rbd_dev->open_count && !force)
6396 already = test_and_set_bit(RBD_DEV_FLAG_REMOVING,
6398 spin_unlock_irq(&rbd_dev->lock);
6400 spin_unlock(&rbd_dev_list_lock);
6401 if (ret < 0 || already)
6406 * Prevent new IO from being queued and wait for existing
6407 * IO to complete/fail.
6409 blk_mq_freeze_queue(rbd_dev->disk->queue);
6410 blk_set_queue_dying(rbd_dev->disk->queue);
6413 down_write(&rbd_dev->lock_rwsem);
6414 if (__rbd_is_lock_owner(rbd_dev))
6415 rbd_unlock(rbd_dev);
6416 up_write(&rbd_dev->lock_rwsem);
6417 rbd_unregister_watch(rbd_dev);
6420 * Don't free anything from rbd_dev->disk until after all
6421 * notifies are completely processed. Otherwise
6422 * rbd_bus_del_dev() will race with rbd_watch_cb(), resulting
6423 * in a potential use after free of rbd_dev->disk or rbd_dev.
6425 rbd_dev_device_release(rbd_dev);
6426 rbd_dev_image_release(rbd_dev);
6431 static ssize_t rbd_remove(struct bus_type *bus,
6438 return do_rbd_remove(bus, buf, count);
6441 static ssize_t rbd_remove_single_major(struct bus_type *bus,
6445 return do_rbd_remove(bus, buf, count);
6449 * create control files in sysfs
6452 static int rbd_sysfs_init(void)
6456 ret = device_register(&rbd_root_dev);
6460 ret = bus_register(&rbd_bus_type);
6462 device_unregister(&rbd_root_dev);
6467 static void rbd_sysfs_cleanup(void)
6469 bus_unregister(&rbd_bus_type);
6470 device_unregister(&rbd_root_dev);
6473 static int rbd_slab_init(void)
6475 rbd_assert(!rbd_img_request_cache);
6476 rbd_img_request_cache = KMEM_CACHE(rbd_img_request, 0);
6477 if (!rbd_img_request_cache)
6480 rbd_assert(!rbd_obj_request_cache);
6481 rbd_obj_request_cache = KMEM_CACHE(rbd_obj_request, 0);
6482 if (!rbd_obj_request_cache)
6485 rbd_assert(!rbd_segment_name_cache);
6486 rbd_segment_name_cache = kmem_cache_create("rbd_segment_name",
6487 CEPH_MAX_OID_NAME_LEN + 1, 1, 0, NULL);
6488 if (rbd_segment_name_cache)
6491 kmem_cache_destroy(rbd_obj_request_cache);
6492 rbd_obj_request_cache = NULL;
6494 kmem_cache_destroy(rbd_img_request_cache);
6495 rbd_img_request_cache = NULL;
6500 static void rbd_slab_exit(void)
6502 rbd_assert(rbd_segment_name_cache);
6503 kmem_cache_destroy(rbd_segment_name_cache);
6504 rbd_segment_name_cache = NULL;
6506 rbd_assert(rbd_obj_request_cache);
6507 kmem_cache_destroy(rbd_obj_request_cache);
6508 rbd_obj_request_cache = NULL;
6510 rbd_assert(rbd_img_request_cache);
6511 kmem_cache_destroy(rbd_img_request_cache);
6512 rbd_img_request_cache = NULL;
6515 static int __init rbd_init(void)
6519 if (!libceph_compatible(NULL)) {
6520 rbd_warn(NULL, "libceph incompatibility (quitting)");
6524 rc = rbd_slab_init();
6529 * The number of active work items is limited by the number of
6530 * rbd devices * queue depth, so leave @max_active at default.
6532 rbd_wq = alloc_workqueue(RBD_DRV_NAME, WQ_MEM_RECLAIM, 0);
6539 rbd_major = register_blkdev(0, RBD_DRV_NAME);
6540 if (rbd_major < 0) {
6546 rc = rbd_sysfs_init();
6548 goto err_out_blkdev;
6551 pr_info("loaded (major %d)\n", rbd_major);
6553 pr_info("loaded\n");
6559 unregister_blkdev(rbd_major, RBD_DRV_NAME);
6561 destroy_workqueue(rbd_wq);
6567 static void __exit rbd_exit(void)
6569 ida_destroy(&rbd_dev_id_ida);
6570 rbd_sysfs_cleanup();
6572 unregister_blkdev(rbd_major, RBD_DRV_NAME);
6573 destroy_workqueue(rbd_wq);
6577 module_init(rbd_init);
6578 module_exit(rbd_exit);
6580 MODULE_AUTHOR("Alex Elder <elder@inktank.com>");
6581 MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
6582 MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
6583 /* following authorship retained from original osdblk.c */
6584 MODULE_AUTHOR("Jeff Garzik <jeff@garzik.org>");
6586 MODULE_DESCRIPTION("RADOS Block Device (RBD) driver");
6587 MODULE_LICENSE("GPL");