3 rbd.c -- Export ceph rados objects as a Linux block device
6 based on drivers/block/osdblk.c:
8 Copyright 2009 Red Hat, Inc.
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program; see the file COPYING. If not, write to
21 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 For usage instructions, please refer to:
27 Documentation/ABI/testing/sysfs-bus-rbd
31 #include <linux/ceph/libceph.h>
32 #include <linux/ceph/osd_client.h>
33 #include <linux/ceph/mon_client.h>
34 #include <linux/ceph/cls_lock_client.h>
35 #include <linux/ceph/striper.h>
36 #include <linux/ceph/decode.h>
37 #include <linux/parser.h>
38 #include <linux/bsearch.h>
40 #include <linux/kernel.h>
41 #include <linux/device.h>
42 #include <linux/module.h>
43 #include <linux/blk-mq.h>
45 #include <linux/blkdev.h>
46 #include <linux/slab.h>
47 #include <linux/idr.h>
48 #include <linux/workqueue.h>
50 #include "rbd_types.h"
52 #define RBD_DEBUG /* Activate rbd_assert() calls */
55 * Increment the given counter and return its updated value.
56 * If the counter is already 0 it will not be incremented.
57 * If the counter is already at its maximum value returns
58 * -EINVAL without updating it.
60 static int atomic_inc_return_safe(atomic_t *v)
64 counter = (unsigned int)atomic_fetch_add_unless(v, 1, 0);
65 if (counter <= (unsigned int)INT_MAX)
73 /* Decrement the counter. Return the resulting value, or -EINVAL */
74 static int atomic_dec_return_safe(atomic_t *v)
78 counter = atomic_dec_return(v);
87 #define RBD_DRV_NAME "rbd"
89 #define RBD_MINORS_PER_MAJOR 256
90 #define RBD_SINGLE_MAJOR_PART_SHIFT 4
92 #define RBD_MAX_PARENT_CHAIN_LEN 16
94 #define RBD_SNAP_DEV_NAME_PREFIX "snap_"
95 #define RBD_MAX_SNAP_NAME_LEN \
96 (NAME_MAX - (sizeof (RBD_SNAP_DEV_NAME_PREFIX) - 1))
98 #define RBD_MAX_SNAP_COUNT 510 /* allows max snapc to fit in 4KB */
100 #define RBD_SNAP_HEAD_NAME "-"
102 #define BAD_SNAP_INDEX U32_MAX /* invalid index into snap array */
104 /* This allows a single page to hold an image name sent by OSD */
105 #define RBD_IMAGE_NAME_LEN_MAX (PAGE_SIZE - sizeof (__le32) - 1)
106 #define RBD_IMAGE_ID_LEN_MAX 64
108 #define RBD_OBJ_PREFIX_LEN_MAX 64
110 #define RBD_NOTIFY_TIMEOUT 5 /* seconds */
111 #define RBD_RETRY_DELAY msecs_to_jiffies(1000)
115 #define RBD_FEATURE_LAYERING (1ULL<<0)
116 #define RBD_FEATURE_STRIPINGV2 (1ULL<<1)
117 #define RBD_FEATURE_EXCLUSIVE_LOCK (1ULL<<2)
118 #define RBD_FEATURE_OBJECT_MAP (1ULL<<3)
119 #define RBD_FEATURE_FAST_DIFF (1ULL<<4)
120 #define RBD_FEATURE_DEEP_FLATTEN (1ULL<<5)
121 #define RBD_FEATURE_DATA_POOL (1ULL<<7)
122 #define RBD_FEATURE_OPERATIONS (1ULL<<8)
124 #define RBD_FEATURES_ALL (RBD_FEATURE_LAYERING | \
125 RBD_FEATURE_STRIPINGV2 | \
126 RBD_FEATURE_EXCLUSIVE_LOCK | \
127 RBD_FEATURE_OBJECT_MAP | \
128 RBD_FEATURE_FAST_DIFF | \
129 RBD_FEATURE_DEEP_FLATTEN | \
130 RBD_FEATURE_DATA_POOL | \
131 RBD_FEATURE_OPERATIONS)
133 /* Features supported by this (client software) implementation. */
135 #define RBD_FEATURES_SUPPORTED (RBD_FEATURES_ALL)
138 * An RBD device name will be "rbd#", where the "rbd" comes from
139 * RBD_DRV_NAME above, and # is a unique integer identifier.
141 #define DEV_NAME_LEN 32
144 * block device image metadata (in-memory version)
146 struct rbd_image_header {
147 /* These six fields never change for a given rbd image */
153 u64 features; /* Might be changeable someday? */
155 /* The remaining fields need to be updated occasionally */
157 struct ceph_snap_context *snapc;
158 char *snap_names; /* format 1 only */
159 u64 *snap_sizes; /* format 1 only */
163 * An rbd image specification.
165 * The tuple (pool_id, image_id, snap_id) is sufficient to uniquely
166 * identify an image. Each rbd_dev structure includes a pointer to
167 * an rbd_spec structure that encapsulates this identity.
169 * Each of the id's in an rbd_spec has an associated name. For a
170 * user-mapped image, the names are supplied and the id's associated
171 * with them are looked up. For a layered image, a parent image is
172 * defined by the tuple, and the names are looked up.
174 * An rbd_dev structure contains a parent_spec pointer which is
175 * non-null if the image it represents is a child in a layered
176 * image. This pointer will refer to the rbd_spec structure used
177 * by the parent rbd_dev for its own identity (i.e., the structure
178 * is shared between the parent and child).
180 * Since these structures are populated once, during the discovery
181 * phase of image construction, they are effectively immutable so
182 * we make no effort to synchronize access to them.
184 * Note that code herein does not assume the image name is known (it
185 * could be a null pointer).
189 const char *pool_name;
190 const char *pool_ns; /* NULL if default, never "" */
192 const char *image_id;
193 const char *image_name;
196 const char *snap_name;
202 * an instance of the client. multiple devices may share an rbd client.
205 struct ceph_client *client;
207 struct list_head node;
210 struct pending_result {
211 int result; /* first nonzero result */
215 struct rbd_img_request;
217 enum obj_request_type {
218 OBJ_REQUEST_NODATA = 1,
219 OBJ_REQUEST_BIO, /* pointer into provided bio (list) */
220 OBJ_REQUEST_BVECS, /* pointer into provided bio_vec array */
221 OBJ_REQUEST_OWN_BVECS, /* private bio_vec array, doesn't own pages */
224 enum obj_operation_type {
231 #define RBD_OBJ_FLAG_DELETION (1U << 0)
232 #define RBD_OBJ_FLAG_COPYUP_ENABLED (1U << 1)
233 #define RBD_OBJ_FLAG_COPYUP_ZEROS (1U << 2)
234 #define RBD_OBJ_FLAG_MAY_EXIST (1U << 3)
235 #define RBD_OBJ_FLAG_NOOP_FOR_NONEXISTENT (1U << 4)
237 enum rbd_obj_read_state {
238 RBD_OBJ_READ_START = 1,
244 * Writes go through the following state machine to deal with
247 * . . . . . RBD_OBJ_WRITE_GUARD. . . . . . . . . . . . . .
250 * . RBD_OBJ_WRITE_READ_FROM_PARENT. . . .
252 * . v v (deep-copyup .
253 * (image . RBD_OBJ_WRITE_COPYUP_EMPTY_SNAPC . not needed) .
256 * . . . .RBD_OBJ_WRITE_COPYUP_OPS. . . . . (copyup .
259 * done . . . . . . . . . . . . . . . . . .
264 * Writes start in RBD_OBJ_WRITE_GUARD or _FLAT, depending on whether
265 * assert_exists guard is needed or not (in some cases it's not needed
266 * even if there is a parent).
268 enum rbd_obj_write_state {
269 RBD_OBJ_WRITE_START = 1,
270 RBD_OBJ_WRITE_PRE_OBJECT_MAP,
271 RBD_OBJ_WRITE_OBJECT,
272 __RBD_OBJ_WRITE_COPYUP,
273 RBD_OBJ_WRITE_COPYUP,
274 RBD_OBJ_WRITE_POST_OBJECT_MAP,
277 enum rbd_obj_copyup_state {
278 RBD_OBJ_COPYUP_START = 1,
279 RBD_OBJ_COPYUP_READ_PARENT,
280 __RBD_OBJ_COPYUP_OBJECT_MAPS,
281 RBD_OBJ_COPYUP_OBJECT_MAPS,
282 __RBD_OBJ_COPYUP_WRITE_OBJECT,
283 RBD_OBJ_COPYUP_WRITE_OBJECT,
286 struct rbd_obj_request {
287 struct ceph_object_extent ex;
288 unsigned int flags; /* RBD_OBJ_FLAG_* */
290 enum rbd_obj_read_state read_state; /* for reads */
291 enum rbd_obj_write_state write_state; /* for writes */
294 struct rbd_img_request *img_request;
295 struct ceph_file_extent *img_extents;
299 struct ceph_bio_iter bio_pos;
301 struct ceph_bvec_iter bvec_pos;
307 enum rbd_obj_copyup_state copyup_state;
308 struct bio_vec *copyup_bvecs;
309 u32 copyup_bvec_count;
311 struct list_head osd_reqs; /* w/ r_private_item */
313 struct mutex state_mutex;
314 struct pending_result pending;
319 IMG_REQ_CHILD, /* initiator: block = 0, child image = 1 */
320 IMG_REQ_LAYERED, /* ENOENT handling: normal = 0, layered = 1 */
325 RBD_IMG_EXCLUSIVE_LOCK,
326 __RBD_IMG_OBJECT_REQUESTS,
327 RBD_IMG_OBJECT_REQUESTS,
330 struct rbd_img_request {
331 struct rbd_device *rbd_dev;
332 enum obj_operation_type op_type;
333 enum obj_request_type data_type;
335 enum rbd_img_state state;
337 u64 snap_id; /* for reads */
338 struct ceph_snap_context *snapc; /* for writes */
341 struct request *rq; /* block request */
342 struct rbd_obj_request *obj_request; /* obj req initiator */
345 struct list_head lock_item;
346 struct list_head object_extents; /* obj_req.ex structs */
348 struct mutex state_mutex;
349 struct pending_result pending;
350 struct work_struct work;
355 #define for_each_obj_request(ireq, oreq) \
356 list_for_each_entry(oreq, &(ireq)->object_extents, ex.oe_item)
357 #define for_each_obj_request_safe(ireq, oreq, n) \
358 list_for_each_entry_safe(oreq, n, &(ireq)->object_extents, ex.oe_item)
360 enum rbd_watch_state {
361 RBD_WATCH_STATE_UNREGISTERED,
362 RBD_WATCH_STATE_REGISTERED,
363 RBD_WATCH_STATE_ERROR,
366 enum rbd_lock_state {
367 RBD_LOCK_STATE_UNLOCKED,
368 RBD_LOCK_STATE_LOCKED,
369 RBD_LOCK_STATE_RELEASING,
372 /* WatchNotify::ClientId */
373 struct rbd_client_id {
387 int dev_id; /* blkdev unique id */
389 int major; /* blkdev assigned major */
391 struct gendisk *disk; /* blkdev's gendisk and rq */
393 u32 image_format; /* Either 1 or 2 */
394 struct rbd_client *rbd_client;
396 char name[DEV_NAME_LEN]; /* blkdev name, e.g. rbd3 */
398 spinlock_t lock; /* queue, flags, open_count */
400 struct rbd_image_header header;
401 unsigned long flags; /* possibly lock protected */
402 struct rbd_spec *spec;
403 struct rbd_options *opts;
404 char *config_info; /* add{,_single_major} string */
406 struct ceph_object_id header_oid;
407 struct ceph_object_locator header_oloc;
409 struct ceph_file_layout layout; /* used for all rbd requests */
411 struct mutex watch_mutex;
412 enum rbd_watch_state watch_state;
413 struct ceph_osd_linger_request *watch_handle;
415 struct delayed_work watch_dwork;
417 struct rw_semaphore lock_rwsem;
418 enum rbd_lock_state lock_state;
419 char lock_cookie[32];
420 struct rbd_client_id owner_cid;
421 struct work_struct acquired_lock_work;
422 struct work_struct released_lock_work;
423 struct delayed_work lock_dwork;
424 struct work_struct unlock_work;
425 spinlock_t lock_lists_lock;
426 struct list_head acquiring_list;
427 struct list_head running_list;
428 struct completion acquire_wait;
430 struct completion releasing_wait;
432 spinlock_t object_map_lock;
434 u64 object_map_size; /* in objects */
435 u64 object_map_flags;
437 struct workqueue_struct *task_wq;
439 struct rbd_spec *parent_spec;
442 struct rbd_device *parent;
444 /* Block layer tags. */
445 struct blk_mq_tag_set tag_set;
447 /* protects updating the header */
448 struct rw_semaphore header_rwsem;
450 struct rbd_mapping mapping;
452 struct list_head node;
456 unsigned long open_count; /* protected by lock */
460 * Flag bits for rbd_dev->flags:
461 * - REMOVING (which is coupled with rbd_dev->open_count) is protected
465 RBD_DEV_FLAG_EXISTS, /* mapped snapshot has not been deleted */
466 RBD_DEV_FLAG_REMOVING, /* this mapping is being removed */
469 static DEFINE_MUTEX(client_mutex); /* Serialize client creation */
471 static LIST_HEAD(rbd_dev_list); /* devices */
472 static DEFINE_SPINLOCK(rbd_dev_list_lock);
474 static LIST_HEAD(rbd_client_list); /* clients */
475 static DEFINE_SPINLOCK(rbd_client_list_lock);
477 /* Slab caches for frequently-allocated structures */
479 static struct kmem_cache *rbd_img_request_cache;
480 static struct kmem_cache *rbd_obj_request_cache;
482 static int rbd_major;
483 static DEFINE_IDA(rbd_dev_id_ida);
485 static struct workqueue_struct *rbd_wq;
487 static struct ceph_snap_context rbd_empty_snapc = {
488 .nref = REFCOUNT_INIT(1),
492 * single-major requires >= 0.75 version of userspace rbd utility.
494 static bool single_major = true;
495 module_param(single_major, bool, 0444);
496 MODULE_PARM_DESC(single_major, "Use a single major number for all rbd devices (default: true)");
498 static ssize_t add_store(struct bus_type *bus, const char *buf, size_t count);
499 static ssize_t remove_store(struct bus_type *bus, const char *buf,
501 static ssize_t add_single_major_store(struct bus_type *bus, const char *buf,
503 static ssize_t remove_single_major_store(struct bus_type *bus, const char *buf,
505 static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth);
507 static int rbd_dev_id_to_minor(int dev_id)
509 return dev_id << RBD_SINGLE_MAJOR_PART_SHIFT;
512 static int minor_to_rbd_dev_id(int minor)
514 return minor >> RBD_SINGLE_MAJOR_PART_SHIFT;
517 static bool __rbd_is_lock_owner(struct rbd_device *rbd_dev)
519 lockdep_assert_held(&rbd_dev->lock_rwsem);
521 return rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED ||
522 rbd_dev->lock_state == RBD_LOCK_STATE_RELEASING;
525 static bool rbd_is_lock_owner(struct rbd_device *rbd_dev)
529 down_read(&rbd_dev->lock_rwsem);
530 is_lock_owner = __rbd_is_lock_owner(rbd_dev);
531 up_read(&rbd_dev->lock_rwsem);
532 return is_lock_owner;
535 static ssize_t supported_features_show(struct bus_type *bus, char *buf)
537 return sprintf(buf, "0x%llx\n", RBD_FEATURES_SUPPORTED);
540 static BUS_ATTR_WO(add);
541 static BUS_ATTR_WO(remove);
542 static BUS_ATTR_WO(add_single_major);
543 static BUS_ATTR_WO(remove_single_major);
544 static BUS_ATTR_RO(supported_features);
546 static struct attribute *rbd_bus_attrs[] = {
548 &bus_attr_remove.attr,
549 &bus_attr_add_single_major.attr,
550 &bus_attr_remove_single_major.attr,
551 &bus_attr_supported_features.attr,
555 static umode_t rbd_bus_is_visible(struct kobject *kobj,
556 struct attribute *attr, int index)
559 (attr == &bus_attr_add_single_major.attr ||
560 attr == &bus_attr_remove_single_major.attr))
566 static const struct attribute_group rbd_bus_group = {
567 .attrs = rbd_bus_attrs,
568 .is_visible = rbd_bus_is_visible,
570 __ATTRIBUTE_GROUPS(rbd_bus);
572 static struct bus_type rbd_bus_type = {
574 .bus_groups = rbd_bus_groups,
577 static void rbd_root_dev_release(struct device *dev)
581 static struct device rbd_root_dev = {
583 .release = rbd_root_dev_release,
586 static __printf(2, 3)
587 void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...)
589 struct va_format vaf;
597 printk(KERN_WARNING "%s: %pV\n", RBD_DRV_NAME, &vaf);
598 else if (rbd_dev->disk)
599 printk(KERN_WARNING "%s: %s: %pV\n",
600 RBD_DRV_NAME, rbd_dev->disk->disk_name, &vaf);
601 else if (rbd_dev->spec && rbd_dev->spec->image_name)
602 printk(KERN_WARNING "%s: image %s: %pV\n",
603 RBD_DRV_NAME, rbd_dev->spec->image_name, &vaf);
604 else if (rbd_dev->spec && rbd_dev->spec->image_id)
605 printk(KERN_WARNING "%s: id %s: %pV\n",
606 RBD_DRV_NAME, rbd_dev->spec->image_id, &vaf);
608 printk(KERN_WARNING "%s: rbd_dev %p: %pV\n",
609 RBD_DRV_NAME, rbd_dev, &vaf);
614 #define rbd_assert(expr) \
615 if (unlikely(!(expr))) { \
616 printk(KERN_ERR "\nAssertion failure in %s() " \
618 "\trbd_assert(%s);\n\n", \
619 __func__, __LINE__, #expr); \
622 #else /* !RBD_DEBUG */
623 # define rbd_assert(expr) ((void) 0)
624 #endif /* !RBD_DEBUG */
626 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
628 static int rbd_dev_refresh(struct rbd_device *rbd_dev);
629 static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev);
630 static int rbd_dev_header_info(struct rbd_device *rbd_dev);
631 static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev);
632 static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
634 static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
635 u8 *order, u64 *snap_size);
636 static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
638 static int rbd_dev_v2_get_flags(struct rbd_device *rbd_dev);
640 static void rbd_obj_handle_request(struct rbd_obj_request *obj_req, int result);
641 static void rbd_img_handle_request(struct rbd_img_request *img_req, int result);
644 * Return true if nothing else is pending.
646 static bool pending_result_dec(struct pending_result *pending, int *result)
648 rbd_assert(pending->num_pending > 0);
650 if (*result && !pending->result)
651 pending->result = *result;
652 if (--pending->num_pending)
655 *result = pending->result;
659 static int rbd_open(struct block_device *bdev, fmode_t mode)
661 struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
662 bool removing = false;
664 spin_lock_irq(&rbd_dev->lock);
665 if (test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags))
668 rbd_dev->open_count++;
669 spin_unlock_irq(&rbd_dev->lock);
673 (void) get_device(&rbd_dev->dev);
678 static void rbd_release(struct gendisk *disk, fmode_t mode)
680 struct rbd_device *rbd_dev = disk->private_data;
681 unsigned long open_count_before;
683 spin_lock_irq(&rbd_dev->lock);
684 open_count_before = rbd_dev->open_count--;
685 spin_unlock_irq(&rbd_dev->lock);
686 rbd_assert(open_count_before > 0);
688 put_device(&rbd_dev->dev);
691 static int rbd_ioctl_set_ro(struct rbd_device *rbd_dev, unsigned long arg)
695 if (get_user(ro, (int __user *)arg))
698 /* Snapshots can't be marked read-write */
699 if (rbd_dev->spec->snap_id != CEPH_NOSNAP && !ro)
702 /* Let blkdev_roset() handle it */
706 static int rbd_ioctl(struct block_device *bdev, fmode_t mode,
707 unsigned int cmd, unsigned long arg)
709 struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
714 ret = rbd_ioctl_set_ro(rbd_dev, arg);
724 static int rbd_compat_ioctl(struct block_device *bdev, fmode_t mode,
725 unsigned int cmd, unsigned long arg)
727 return rbd_ioctl(bdev, mode, cmd, arg);
729 #endif /* CONFIG_COMPAT */
731 static const struct block_device_operations rbd_bd_ops = {
732 .owner = THIS_MODULE,
734 .release = rbd_release,
737 .compat_ioctl = rbd_compat_ioctl,
742 * Initialize an rbd client instance. Success or not, this function
743 * consumes ceph_opts. Caller holds client_mutex.
745 static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts)
747 struct rbd_client *rbdc;
750 dout("%s:\n", __func__);
751 rbdc = kmalloc(sizeof(struct rbd_client), GFP_KERNEL);
755 kref_init(&rbdc->kref);
756 INIT_LIST_HEAD(&rbdc->node);
758 rbdc->client = ceph_create_client(ceph_opts, rbdc);
759 if (IS_ERR(rbdc->client))
761 ceph_opts = NULL; /* Now rbdc->client is responsible for ceph_opts */
763 ret = ceph_open_session(rbdc->client);
767 spin_lock(&rbd_client_list_lock);
768 list_add_tail(&rbdc->node, &rbd_client_list);
769 spin_unlock(&rbd_client_list_lock);
771 dout("%s: rbdc %p\n", __func__, rbdc);
775 ceph_destroy_client(rbdc->client);
780 ceph_destroy_options(ceph_opts);
781 dout("%s: error %d\n", __func__, ret);
786 static struct rbd_client *__rbd_get_client(struct rbd_client *rbdc)
788 kref_get(&rbdc->kref);
794 * Find a ceph client with specific addr and configuration. If
795 * found, bump its reference count.
797 static struct rbd_client *rbd_client_find(struct ceph_options *ceph_opts)
799 struct rbd_client *client_node;
802 if (ceph_opts->flags & CEPH_OPT_NOSHARE)
805 spin_lock(&rbd_client_list_lock);
806 list_for_each_entry(client_node, &rbd_client_list, node) {
807 if (!ceph_compare_options(ceph_opts, client_node->client)) {
808 __rbd_get_client(client_node);
814 spin_unlock(&rbd_client_list_lock);
816 return found ? client_node : NULL;
820 * (Per device) rbd map options
830 /* string args above */
839 static match_table_t rbd_opts_tokens = {
840 {Opt_queue_depth, "queue_depth=%d"},
841 {Opt_alloc_size, "alloc_size=%d"},
842 {Opt_lock_timeout, "lock_timeout=%d"},
844 {Opt_pool_ns, "_pool_ns=%s"},
845 /* string args above */
846 {Opt_read_only, "read_only"},
847 {Opt_read_only, "ro"}, /* Alternate spelling */
848 {Opt_read_write, "read_write"},
849 {Opt_read_write, "rw"}, /* Alternate spelling */
850 {Opt_lock_on_read, "lock_on_read"},
851 {Opt_exclusive, "exclusive"},
852 {Opt_notrim, "notrim"},
859 unsigned long lock_timeout;
866 #define RBD_QUEUE_DEPTH_DEFAULT BLKDEV_MAX_RQ
867 #define RBD_ALLOC_SIZE_DEFAULT (64 * 1024)
868 #define RBD_LOCK_TIMEOUT_DEFAULT 0 /* no timeout */
869 #define RBD_READ_ONLY_DEFAULT false
870 #define RBD_LOCK_ON_READ_DEFAULT false
871 #define RBD_EXCLUSIVE_DEFAULT false
872 #define RBD_TRIM_DEFAULT true
874 struct parse_rbd_opts_ctx {
875 struct rbd_spec *spec;
876 struct rbd_options *opts;
879 static int parse_rbd_opts_token(char *c, void *private)
881 struct parse_rbd_opts_ctx *pctx = private;
882 substring_t argstr[MAX_OPT_ARGS];
883 int token, intval, ret;
885 token = match_token(c, rbd_opts_tokens, argstr);
886 if (token < Opt_last_int) {
887 ret = match_int(&argstr[0], &intval);
889 pr_err("bad option arg (not int) at '%s'\n", c);
892 dout("got int token %d val %d\n", token, intval);
893 } else if (token > Opt_last_int && token < Opt_last_string) {
894 dout("got string token %d val %s\n", token, argstr[0].from);
896 dout("got token %d\n", token);
900 case Opt_queue_depth:
902 pr_err("queue_depth out of range\n");
905 pctx->opts->queue_depth = intval;
908 if (intval < SECTOR_SIZE) {
909 pr_err("alloc_size out of range\n");
912 if (!is_power_of_2(intval)) {
913 pr_err("alloc_size must be a power of 2\n");
916 pctx->opts->alloc_size = intval;
918 case Opt_lock_timeout:
919 /* 0 is "wait forever" (i.e. infinite timeout) */
920 if (intval < 0 || intval > INT_MAX / 1000) {
921 pr_err("lock_timeout out of range\n");
924 pctx->opts->lock_timeout = msecs_to_jiffies(intval * 1000);
927 kfree(pctx->spec->pool_ns);
928 pctx->spec->pool_ns = match_strdup(argstr);
929 if (!pctx->spec->pool_ns)
933 pctx->opts->read_only = true;
936 pctx->opts->read_only = false;
938 case Opt_lock_on_read:
939 pctx->opts->lock_on_read = true;
942 pctx->opts->exclusive = true;
945 pctx->opts->trim = false;
948 /* libceph prints "bad option" msg */
955 static char* obj_op_name(enum obj_operation_type op_type)
972 * Destroy ceph client
974 * Caller must hold rbd_client_list_lock.
976 static void rbd_client_release(struct kref *kref)
978 struct rbd_client *rbdc = container_of(kref, struct rbd_client, kref);
980 dout("%s: rbdc %p\n", __func__, rbdc);
981 spin_lock(&rbd_client_list_lock);
982 list_del(&rbdc->node);
983 spin_unlock(&rbd_client_list_lock);
985 ceph_destroy_client(rbdc->client);
990 * Drop reference to ceph client node. If it's not referenced anymore, release
993 static void rbd_put_client(struct rbd_client *rbdc)
996 kref_put(&rbdc->kref, rbd_client_release);
1000 * Get a ceph client with specific addr and configuration, if one does
1001 * not exist create it. Either way, ceph_opts is consumed by this
1004 static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts)
1006 struct rbd_client *rbdc;
1009 mutex_lock(&client_mutex);
1010 rbdc = rbd_client_find(ceph_opts);
1012 ceph_destroy_options(ceph_opts);
1015 * Using an existing client. Make sure ->pg_pools is up to
1016 * date before we look up the pool id in do_rbd_add().
1018 ret = ceph_wait_for_latest_osdmap(rbdc->client,
1019 rbdc->client->options->mount_timeout);
1021 rbd_warn(NULL, "failed to get latest osdmap: %d", ret);
1022 rbd_put_client(rbdc);
1023 rbdc = ERR_PTR(ret);
1026 rbdc = rbd_client_create(ceph_opts);
1028 mutex_unlock(&client_mutex);
1033 static bool rbd_image_format_valid(u32 image_format)
1035 return image_format == 1 || image_format == 2;
1038 static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk *ondisk)
1043 /* The header has to start with the magic rbd header text */
1044 if (memcmp(&ondisk->text, RBD_HEADER_TEXT, sizeof (RBD_HEADER_TEXT)))
1047 /* The bio layer requires at least sector-sized I/O */
1049 if (ondisk->options.order < SECTOR_SHIFT)
1052 /* If we use u64 in a few spots we may be able to loosen this */
1054 if (ondisk->options.order > 8 * sizeof (int) - 1)
1058 * The size of a snapshot header has to fit in a size_t, and
1059 * that limits the number of snapshots.
1061 snap_count = le32_to_cpu(ondisk->snap_count);
1062 size = SIZE_MAX - sizeof (struct ceph_snap_context);
1063 if (snap_count > size / sizeof (__le64))
1067 * Not only that, but the size of the entire the snapshot
1068 * header must also be representable in a size_t.
1070 size -= snap_count * sizeof (__le64);
1071 if ((u64) size < le64_to_cpu(ondisk->snap_names_len))
1078 * returns the size of an object in the image
1080 static u32 rbd_obj_bytes(struct rbd_image_header *header)
1082 return 1U << header->obj_order;
1085 static void rbd_init_layout(struct rbd_device *rbd_dev)
1087 if (rbd_dev->header.stripe_unit == 0 ||
1088 rbd_dev->header.stripe_count == 0) {
1089 rbd_dev->header.stripe_unit = rbd_obj_bytes(&rbd_dev->header);
1090 rbd_dev->header.stripe_count = 1;
1093 rbd_dev->layout.stripe_unit = rbd_dev->header.stripe_unit;
1094 rbd_dev->layout.stripe_count = rbd_dev->header.stripe_count;
1095 rbd_dev->layout.object_size = rbd_obj_bytes(&rbd_dev->header);
1096 rbd_dev->layout.pool_id = rbd_dev->header.data_pool_id == CEPH_NOPOOL ?
1097 rbd_dev->spec->pool_id : rbd_dev->header.data_pool_id;
1098 RCU_INIT_POINTER(rbd_dev->layout.pool_ns, NULL);
1102 * Fill an rbd image header with information from the given format 1
1105 static int rbd_header_from_disk(struct rbd_device *rbd_dev,
1106 struct rbd_image_header_ondisk *ondisk)
1108 struct rbd_image_header *header = &rbd_dev->header;
1109 bool first_time = header->object_prefix == NULL;
1110 struct ceph_snap_context *snapc;
1111 char *object_prefix = NULL;
1112 char *snap_names = NULL;
1113 u64 *snap_sizes = NULL;
1118 /* Allocate this now to avoid having to handle failure below */
1121 object_prefix = kstrndup(ondisk->object_prefix,
1122 sizeof(ondisk->object_prefix),
1128 /* Allocate the snapshot context and fill it in */
1130 snap_count = le32_to_cpu(ondisk->snap_count);
1131 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
1134 snapc->seq = le64_to_cpu(ondisk->snap_seq);
1136 struct rbd_image_snap_ondisk *snaps;
1137 u64 snap_names_len = le64_to_cpu(ondisk->snap_names_len);
1139 /* We'll keep a copy of the snapshot names... */
1141 if (snap_names_len > (u64)SIZE_MAX)
1143 snap_names = kmalloc(snap_names_len, GFP_KERNEL);
1147 /* ...as well as the array of their sizes. */
1148 snap_sizes = kmalloc_array(snap_count,
1149 sizeof(*header->snap_sizes),
1155 * Copy the names, and fill in each snapshot's id
1158 * Note that rbd_dev_v1_header_info() guarantees the
1159 * ondisk buffer we're working with has
1160 * snap_names_len bytes beyond the end of the
1161 * snapshot id array, this memcpy() is safe.
1163 memcpy(snap_names, &ondisk->snaps[snap_count], snap_names_len);
1164 snaps = ondisk->snaps;
1165 for (i = 0; i < snap_count; i++) {
1166 snapc->snaps[i] = le64_to_cpu(snaps[i].id);
1167 snap_sizes[i] = le64_to_cpu(snaps[i].image_size);
1171 /* We won't fail any more, fill in the header */
1174 header->object_prefix = object_prefix;
1175 header->obj_order = ondisk->options.order;
1176 rbd_init_layout(rbd_dev);
1178 ceph_put_snap_context(header->snapc);
1179 kfree(header->snap_names);
1180 kfree(header->snap_sizes);
1183 /* The remaining fields always get updated (when we refresh) */
1185 header->image_size = le64_to_cpu(ondisk->image_size);
1186 header->snapc = snapc;
1187 header->snap_names = snap_names;
1188 header->snap_sizes = snap_sizes;
1196 ceph_put_snap_context(snapc);
1197 kfree(object_prefix);
1202 static const char *_rbd_dev_v1_snap_name(struct rbd_device *rbd_dev, u32 which)
1204 const char *snap_name;
1206 rbd_assert(which < rbd_dev->header.snapc->num_snaps);
1208 /* Skip over names until we find the one we are looking for */
1210 snap_name = rbd_dev->header.snap_names;
1212 snap_name += strlen(snap_name) + 1;
1214 return kstrdup(snap_name, GFP_KERNEL);
1218 * Snapshot id comparison function for use with qsort()/bsearch().
1219 * Note that result is for snapshots in *descending* order.
1221 static int snapid_compare_reverse(const void *s1, const void *s2)
1223 u64 snap_id1 = *(u64 *)s1;
1224 u64 snap_id2 = *(u64 *)s2;
1226 if (snap_id1 < snap_id2)
1228 return snap_id1 == snap_id2 ? 0 : -1;
1232 * Search a snapshot context to see if the given snapshot id is
1235 * Returns the position of the snapshot id in the array if it's found,
1236 * or BAD_SNAP_INDEX otherwise.
1238 * Note: The snapshot array is in kept sorted (by the osd) in
1239 * reverse order, highest snapshot id first.
1241 static u32 rbd_dev_snap_index(struct rbd_device *rbd_dev, u64 snap_id)
1243 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
1246 found = bsearch(&snap_id, &snapc->snaps, snapc->num_snaps,
1247 sizeof (snap_id), snapid_compare_reverse);
1249 return found ? (u32)(found - &snapc->snaps[0]) : BAD_SNAP_INDEX;
1252 static const char *rbd_dev_v1_snap_name(struct rbd_device *rbd_dev,
1256 const char *snap_name;
1258 which = rbd_dev_snap_index(rbd_dev, snap_id);
1259 if (which == BAD_SNAP_INDEX)
1260 return ERR_PTR(-ENOENT);
1262 snap_name = _rbd_dev_v1_snap_name(rbd_dev, which);
1263 return snap_name ? snap_name : ERR_PTR(-ENOMEM);
1266 static const char *rbd_snap_name(struct rbd_device *rbd_dev, u64 snap_id)
1268 if (snap_id == CEPH_NOSNAP)
1269 return RBD_SNAP_HEAD_NAME;
1271 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1272 if (rbd_dev->image_format == 1)
1273 return rbd_dev_v1_snap_name(rbd_dev, snap_id);
1275 return rbd_dev_v2_snap_name(rbd_dev, snap_id);
1278 static int rbd_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
1281 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1282 if (snap_id == CEPH_NOSNAP) {
1283 *snap_size = rbd_dev->header.image_size;
1284 } else if (rbd_dev->image_format == 1) {
1287 which = rbd_dev_snap_index(rbd_dev, snap_id);
1288 if (which == BAD_SNAP_INDEX)
1291 *snap_size = rbd_dev->header.snap_sizes[which];
1296 ret = _rbd_dev_v2_snap_size(rbd_dev, snap_id, NULL, &size);
1305 static int rbd_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
1308 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1309 if (snap_id == CEPH_NOSNAP) {
1310 *snap_features = rbd_dev->header.features;
1311 } else if (rbd_dev->image_format == 1) {
1312 *snap_features = 0; /* No features for format 1 */
1317 ret = _rbd_dev_v2_snap_features(rbd_dev, snap_id, &features);
1321 *snap_features = features;
1326 static int rbd_dev_mapping_set(struct rbd_device *rbd_dev)
1328 u64 snap_id = rbd_dev->spec->snap_id;
1333 ret = rbd_snap_size(rbd_dev, snap_id, &size);
1336 ret = rbd_snap_features(rbd_dev, snap_id, &features);
1340 rbd_dev->mapping.size = size;
1341 rbd_dev->mapping.features = features;
1346 static void rbd_dev_mapping_clear(struct rbd_device *rbd_dev)
1348 rbd_dev->mapping.size = 0;
1349 rbd_dev->mapping.features = 0;
1352 static void zero_bvec(struct bio_vec *bv)
1355 unsigned long flags;
1357 buf = bvec_kmap_irq(bv, &flags);
1358 memset(buf, 0, bv->bv_len);
1359 flush_dcache_page(bv->bv_page);
1360 bvec_kunmap_irq(buf, &flags);
1363 static void zero_bios(struct ceph_bio_iter *bio_pos, u32 off, u32 bytes)
1365 struct ceph_bio_iter it = *bio_pos;
1367 ceph_bio_iter_advance(&it, off);
1368 ceph_bio_iter_advance_step(&it, bytes, ({
1373 static void zero_bvecs(struct ceph_bvec_iter *bvec_pos, u32 off, u32 bytes)
1375 struct ceph_bvec_iter it = *bvec_pos;
1377 ceph_bvec_iter_advance(&it, off);
1378 ceph_bvec_iter_advance_step(&it, bytes, ({
1384 * Zero a range in @obj_req data buffer defined by a bio (list) or
1385 * (private) bio_vec array.
1387 * @off is relative to the start of the data buffer.
1389 static void rbd_obj_zero_range(struct rbd_obj_request *obj_req, u32 off,
1392 dout("%s %p data buf %u~%u\n", __func__, obj_req, off, bytes);
1394 switch (obj_req->img_request->data_type) {
1395 case OBJ_REQUEST_BIO:
1396 zero_bios(&obj_req->bio_pos, off, bytes);
1398 case OBJ_REQUEST_BVECS:
1399 case OBJ_REQUEST_OWN_BVECS:
1400 zero_bvecs(&obj_req->bvec_pos, off, bytes);
1407 static void rbd_obj_request_destroy(struct kref *kref);
1408 static void rbd_obj_request_put(struct rbd_obj_request *obj_request)
1410 rbd_assert(obj_request != NULL);
1411 dout("%s: obj %p (was %d)\n", __func__, obj_request,
1412 kref_read(&obj_request->kref));
1413 kref_put(&obj_request->kref, rbd_obj_request_destroy);
1416 static void rbd_img_request_destroy(struct kref *kref);
1417 static void rbd_img_request_put(struct rbd_img_request *img_request)
1419 rbd_assert(img_request != NULL);
1420 dout("%s: img %p (was %d)\n", __func__, img_request,
1421 kref_read(&img_request->kref));
1422 kref_put(&img_request->kref, rbd_img_request_destroy);
1425 static inline void rbd_img_obj_request_add(struct rbd_img_request *img_request,
1426 struct rbd_obj_request *obj_request)
1428 rbd_assert(obj_request->img_request == NULL);
1430 /* Image request now owns object's original reference */
1431 obj_request->img_request = img_request;
1432 dout("%s: img %p obj %p\n", __func__, img_request, obj_request);
1435 static inline void rbd_img_obj_request_del(struct rbd_img_request *img_request,
1436 struct rbd_obj_request *obj_request)
1438 dout("%s: img %p obj %p\n", __func__, img_request, obj_request);
1439 list_del(&obj_request->ex.oe_item);
1440 rbd_assert(obj_request->img_request == img_request);
1441 rbd_obj_request_put(obj_request);
1444 static void rbd_osd_submit(struct ceph_osd_request *osd_req)
1446 struct rbd_obj_request *obj_req = osd_req->r_priv;
1448 dout("%s osd_req %p for obj_req %p objno %llu %llu~%llu\n",
1449 __func__, osd_req, obj_req, obj_req->ex.oe_objno,
1450 obj_req->ex.oe_off, obj_req->ex.oe_len);
1451 ceph_osdc_start_request(osd_req->r_osdc, osd_req, false);
1455 * The default/initial value for all image request flags is 0. Each
1456 * is conditionally set to 1 at image request initialization time
1457 * and currently never change thereafter.
1459 static void img_request_layered_set(struct rbd_img_request *img_request)
1461 set_bit(IMG_REQ_LAYERED, &img_request->flags);
1465 static void img_request_layered_clear(struct rbd_img_request *img_request)
1467 clear_bit(IMG_REQ_LAYERED, &img_request->flags);
1471 static bool img_request_layered_test(struct rbd_img_request *img_request)
1474 return test_bit(IMG_REQ_LAYERED, &img_request->flags) != 0;
1477 static bool rbd_obj_is_entire(struct rbd_obj_request *obj_req)
1479 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
1481 return !obj_req->ex.oe_off &&
1482 obj_req->ex.oe_len == rbd_dev->layout.object_size;
1485 static bool rbd_obj_is_tail(struct rbd_obj_request *obj_req)
1487 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
1489 return obj_req->ex.oe_off + obj_req->ex.oe_len ==
1490 rbd_dev->layout.object_size;
1494 * Must be called after rbd_obj_calc_img_extents().
1496 static bool rbd_obj_copyup_enabled(struct rbd_obj_request *obj_req)
1498 if (!obj_req->num_img_extents ||
1499 (rbd_obj_is_entire(obj_req) &&
1500 !obj_req->img_request->snapc->num_snaps))
1506 static u64 rbd_obj_img_extents_bytes(struct rbd_obj_request *obj_req)
1508 return ceph_file_extents_bytes(obj_req->img_extents,
1509 obj_req->num_img_extents);
1512 static bool rbd_img_is_write(struct rbd_img_request *img_req)
1514 switch (img_req->op_type) {
1518 case OBJ_OP_DISCARD:
1519 case OBJ_OP_ZEROOUT:
1526 static void rbd_osd_req_callback(struct ceph_osd_request *osd_req)
1528 struct rbd_obj_request *obj_req = osd_req->r_priv;
1531 dout("%s osd_req %p result %d for obj_req %p\n", __func__, osd_req,
1532 osd_req->r_result, obj_req);
1535 * Writes aren't allowed to return a data payload. In some
1536 * guarded write cases (e.g. stat + zero on an empty object)
1537 * a stat response makes it through, but we don't care.
1539 if (osd_req->r_result > 0 && rbd_img_is_write(obj_req->img_request))
1542 result = osd_req->r_result;
1544 rbd_obj_handle_request(obj_req, result);
1547 static void rbd_osd_format_read(struct ceph_osd_request *osd_req)
1549 struct rbd_obj_request *obj_request = osd_req->r_priv;
1551 osd_req->r_flags = CEPH_OSD_FLAG_READ;
1552 osd_req->r_snapid = obj_request->img_request->snap_id;
1555 static void rbd_osd_format_write(struct ceph_osd_request *osd_req)
1557 struct rbd_obj_request *obj_request = osd_req->r_priv;
1559 osd_req->r_flags = CEPH_OSD_FLAG_WRITE;
1560 ktime_get_real_ts64(&osd_req->r_mtime);
1561 osd_req->r_data_offset = obj_request->ex.oe_off;
1564 static struct ceph_osd_request *
1565 __rbd_obj_add_osd_request(struct rbd_obj_request *obj_req,
1566 struct ceph_snap_context *snapc, int num_ops)
1568 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
1569 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
1570 struct ceph_osd_request *req;
1571 const char *name_format = rbd_dev->image_format == 1 ?
1572 RBD_V1_DATA_FORMAT : RBD_V2_DATA_FORMAT;
1575 req = ceph_osdc_alloc_request(osdc, snapc, num_ops, false, GFP_NOIO);
1577 return ERR_PTR(-ENOMEM);
1579 list_add_tail(&req->r_private_item, &obj_req->osd_reqs);
1580 req->r_callback = rbd_osd_req_callback;
1581 req->r_priv = obj_req;
1584 * Data objects may be stored in a separate pool, but always in
1585 * the same namespace in that pool as the header in its pool.
1587 ceph_oloc_copy(&req->r_base_oloc, &rbd_dev->header_oloc);
1588 req->r_base_oloc.pool = rbd_dev->layout.pool_id;
1590 ret = ceph_oid_aprintf(&req->r_base_oid, GFP_NOIO, name_format,
1591 rbd_dev->header.object_prefix,
1592 obj_req->ex.oe_objno);
1594 return ERR_PTR(ret);
1599 static struct ceph_osd_request *
1600 rbd_obj_add_osd_request(struct rbd_obj_request *obj_req, int num_ops)
1602 return __rbd_obj_add_osd_request(obj_req, obj_req->img_request->snapc,
1606 static struct rbd_obj_request *rbd_obj_request_create(void)
1608 struct rbd_obj_request *obj_request;
1610 obj_request = kmem_cache_zalloc(rbd_obj_request_cache, GFP_NOIO);
1614 ceph_object_extent_init(&obj_request->ex);
1615 INIT_LIST_HEAD(&obj_request->osd_reqs);
1616 mutex_init(&obj_request->state_mutex);
1617 kref_init(&obj_request->kref);
1619 dout("%s %p\n", __func__, obj_request);
1623 static void rbd_obj_request_destroy(struct kref *kref)
1625 struct rbd_obj_request *obj_request;
1626 struct ceph_osd_request *osd_req;
1629 obj_request = container_of(kref, struct rbd_obj_request, kref);
1631 dout("%s: obj %p\n", __func__, obj_request);
1633 while (!list_empty(&obj_request->osd_reqs)) {
1634 osd_req = list_first_entry(&obj_request->osd_reqs,
1635 struct ceph_osd_request, r_private_item);
1636 list_del_init(&osd_req->r_private_item);
1637 ceph_osdc_put_request(osd_req);
1640 switch (obj_request->img_request->data_type) {
1641 case OBJ_REQUEST_NODATA:
1642 case OBJ_REQUEST_BIO:
1643 case OBJ_REQUEST_BVECS:
1644 break; /* Nothing to do */
1645 case OBJ_REQUEST_OWN_BVECS:
1646 kfree(obj_request->bvec_pos.bvecs);
1652 kfree(obj_request->img_extents);
1653 if (obj_request->copyup_bvecs) {
1654 for (i = 0; i < obj_request->copyup_bvec_count; i++) {
1655 if (obj_request->copyup_bvecs[i].bv_page)
1656 __free_page(obj_request->copyup_bvecs[i].bv_page);
1658 kfree(obj_request->copyup_bvecs);
1661 kmem_cache_free(rbd_obj_request_cache, obj_request);
1664 /* It's OK to call this for a device with no parent */
1666 static void rbd_spec_put(struct rbd_spec *spec);
1667 static void rbd_dev_unparent(struct rbd_device *rbd_dev)
1669 rbd_dev_remove_parent(rbd_dev);
1670 rbd_spec_put(rbd_dev->parent_spec);
1671 rbd_dev->parent_spec = NULL;
1672 rbd_dev->parent_overlap = 0;
1676 * Parent image reference counting is used to determine when an
1677 * image's parent fields can be safely torn down--after there are no
1678 * more in-flight requests to the parent image. When the last
1679 * reference is dropped, cleaning them up is safe.
1681 static void rbd_dev_parent_put(struct rbd_device *rbd_dev)
1685 if (!rbd_dev->parent_spec)
1688 counter = atomic_dec_return_safe(&rbd_dev->parent_ref);
1692 /* Last reference; clean up parent data structures */
1695 rbd_dev_unparent(rbd_dev);
1697 rbd_warn(rbd_dev, "parent reference underflow");
1701 * If an image has a non-zero parent overlap, get a reference to its
1704 * Returns true if the rbd device has a parent with a non-zero
1705 * overlap and a reference for it was successfully taken, or
1708 static bool rbd_dev_parent_get(struct rbd_device *rbd_dev)
1712 if (!rbd_dev->parent_spec)
1715 down_read(&rbd_dev->header_rwsem);
1716 if (rbd_dev->parent_overlap)
1717 counter = atomic_inc_return_safe(&rbd_dev->parent_ref);
1718 up_read(&rbd_dev->header_rwsem);
1721 rbd_warn(rbd_dev, "parent reference overflow");
1727 * Caller is responsible for filling in the list of object requests
1728 * that comprises the image request, and the Linux request pointer
1729 * (if there is one).
1731 static struct rbd_img_request *rbd_img_request_create(
1732 struct rbd_device *rbd_dev,
1733 enum obj_operation_type op_type,
1734 struct ceph_snap_context *snapc)
1736 struct rbd_img_request *img_request;
1738 img_request = kmem_cache_zalloc(rbd_img_request_cache, GFP_NOIO);
1742 img_request->rbd_dev = rbd_dev;
1743 img_request->op_type = op_type;
1744 if (!rbd_img_is_write(img_request))
1745 img_request->snap_id = rbd_dev->spec->snap_id;
1747 img_request->snapc = snapc;
1749 if (rbd_dev_parent_get(rbd_dev))
1750 img_request_layered_set(img_request);
1752 INIT_LIST_HEAD(&img_request->lock_item);
1753 INIT_LIST_HEAD(&img_request->object_extents);
1754 mutex_init(&img_request->state_mutex);
1755 kref_init(&img_request->kref);
1757 dout("%s: rbd_dev %p %s -> img %p\n", __func__, rbd_dev,
1758 obj_op_name(op_type), img_request);
1762 static void rbd_img_request_destroy(struct kref *kref)
1764 struct rbd_img_request *img_request;
1765 struct rbd_obj_request *obj_request;
1766 struct rbd_obj_request *next_obj_request;
1768 img_request = container_of(kref, struct rbd_img_request, kref);
1770 dout("%s: img %p\n", __func__, img_request);
1772 WARN_ON(!list_empty(&img_request->lock_item));
1773 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
1774 rbd_img_obj_request_del(img_request, obj_request);
1776 if (img_request_layered_test(img_request)) {
1777 img_request_layered_clear(img_request);
1778 rbd_dev_parent_put(img_request->rbd_dev);
1781 if (rbd_img_is_write(img_request))
1782 ceph_put_snap_context(img_request->snapc);
1784 kmem_cache_free(rbd_img_request_cache, img_request);
1787 #define BITS_PER_OBJ 2
1788 #define OBJS_PER_BYTE (BITS_PER_BYTE / BITS_PER_OBJ)
1789 #define OBJ_MASK ((1 << BITS_PER_OBJ) - 1)
1791 static void __rbd_object_map_index(struct rbd_device *rbd_dev, u64 objno,
1792 u64 *index, u8 *shift)
1796 rbd_assert(objno < rbd_dev->object_map_size);
1797 *index = div_u64_rem(objno, OBJS_PER_BYTE, &off);
1798 *shift = (OBJS_PER_BYTE - off - 1) * BITS_PER_OBJ;
1801 static u8 __rbd_object_map_get(struct rbd_device *rbd_dev, u64 objno)
1806 lockdep_assert_held(&rbd_dev->object_map_lock);
1807 __rbd_object_map_index(rbd_dev, objno, &index, &shift);
1808 return (rbd_dev->object_map[index] >> shift) & OBJ_MASK;
1811 static void __rbd_object_map_set(struct rbd_device *rbd_dev, u64 objno, u8 val)
1817 lockdep_assert_held(&rbd_dev->object_map_lock);
1818 rbd_assert(!(val & ~OBJ_MASK));
1820 __rbd_object_map_index(rbd_dev, objno, &index, &shift);
1821 p = &rbd_dev->object_map[index];
1822 *p = (*p & ~(OBJ_MASK << shift)) | (val << shift);
1825 static u8 rbd_object_map_get(struct rbd_device *rbd_dev, u64 objno)
1829 spin_lock(&rbd_dev->object_map_lock);
1830 state = __rbd_object_map_get(rbd_dev, objno);
1831 spin_unlock(&rbd_dev->object_map_lock);
1835 static bool use_object_map(struct rbd_device *rbd_dev)
1837 return ((rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP) &&
1838 !(rbd_dev->object_map_flags & RBD_FLAG_OBJECT_MAP_INVALID));
1841 static bool rbd_object_map_may_exist(struct rbd_device *rbd_dev, u64 objno)
1845 /* fall back to default logic if object map is disabled or invalid */
1846 if (!use_object_map(rbd_dev))
1849 state = rbd_object_map_get(rbd_dev, objno);
1850 return state != OBJECT_NONEXISTENT;
1853 static void rbd_object_map_name(struct rbd_device *rbd_dev, u64 snap_id,
1854 struct ceph_object_id *oid)
1856 if (snap_id == CEPH_NOSNAP)
1857 ceph_oid_printf(oid, "%s%s", RBD_OBJECT_MAP_PREFIX,
1858 rbd_dev->spec->image_id);
1860 ceph_oid_printf(oid, "%s%s.%016llx", RBD_OBJECT_MAP_PREFIX,
1861 rbd_dev->spec->image_id, snap_id);
1864 static int rbd_object_map_lock(struct rbd_device *rbd_dev)
1866 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
1867 CEPH_DEFINE_OID_ONSTACK(oid);
1870 struct ceph_locker *lockers;
1872 bool broke_lock = false;
1875 rbd_object_map_name(rbd_dev, CEPH_NOSNAP, &oid);
1878 ret = ceph_cls_lock(osdc, &oid, &rbd_dev->header_oloc, RBD_LOCK_NAME,
1879 CEPH_CLS_LOCK_EXCLUSIVE, "", "", "", 0);
1880 if (ret != -EBUSY || broke_lock) {
1882 ret = 0; /* already locked by myself */
1884 rbd_warn(rbd_dev, "failed to lock object map: %d", ret);
1888 ret = ceph_cls_lock_info(osdc, &oid, &rbd_dev->header_oloc,
1889 RBD_LOCK_NAME, &lock_type, &lock_tag,
1890 &lockers, &num_lockers);
1895 rbd_warn(rbd_dev, "failed to get object map lockers: %d", ret);
1900 if (num_lockers == 0)
1903 rbd_warn(rbd_dev, "breaking object map lock owned by %s%llu",
1904 ENTITY_NAME(lockers[0].id.name));
1906 ret = ceph_cls_break_lock(osdc, &oid, &rbd_dev->header_oloc,
1907 RBD_LOCK_NAME, lockers[0].id.cookie,
1908 &lockers[0].id.name);
1909 ceph_free_lockers(lockers, num_lockers);
1914 rbd_warn(rbd_dev, "failed to break object map lock: %d", ret);
1922 static void rbd_object_map_unlock(struct rbd_device *rbd_dev)
1924 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
1925 CEPH_DEFINE_OID_ONSTACK(oid);
1928 rbd_object_map_name(rbd_dev, CEPH_NOSNAP, &oid);
1930 ret = ceph_cls_unlock(osdc, &oid, &rbd_dev->header_oloc, RBD_LOCK_NAME,
1932 if (ret && ret != -ENOENT)
1933 rbd_warn(rbd_dev, "failed to unlock object map: %d", ret);
1936 static int decode_object_map_header(void **p, void *end, u64 *object_map_size)
1944 ceph_decode_32_safe(p, end, header_len, e_inval);
1945 header_end = *p + header_len;
1947 ret = ceph_start_decoding(p, end, 1, "BitVector header", &struct_v,
1952 ceph_decode_64_safe(p, end, *object_map_size, e_inval);
1961 static int __rbd_object_map_load(struct rbd_device *rbd_dev)
1963 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
1964 CEPH_DEFINE_OID_ONSTACK(oid);
1965 struct page **pages;
1969 u64 object_map_bytes;
1970 u64 object_map_size;
1974 rbd_assert(!rbd_dev->object_map && !rbd_dev->object_map_size);
1976 num_objects = ceph_get_num_objects(&rbd_dev->layout,
1977 rbd_dev->mapping.size);
1978 object_map_bytes = DIV_ROUND_UP_ULL(num_objects * BITS_PER_OBJ,
1980 num_pages = calc_pages_for(0, object_map_bytes) + 1;
1981 pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
1983 return PTR_ERR(pages);
1985 reply_len = num_pages * PAGE_SIZE;
1986 rbd_object_map_name(rbd_dev, rbd_dev->spec->snap_id, &oid);
1987 ret = ceph_osdc_call(osdc, &oid, &rbd_dev->header_oloc,
1988 "rbd", "object_map_load", CEPH_OSD_FLAG_READ,
1989 NULL, 0, pages, &reply_len);
1993 p = page_address(pages[0]);
1994 end = p + min(reply_len, (size_t)PAGE_SIZE);
1995 ret = decode_object_map_header(&p, end, &object_map_size);
1999 if (object_map_size != num_objects) {
2000 rbd_warn(rbd_dev, "object map size mismatch: %llu vs %llu",
2001 object_map_size, num_objects);
2006 if (offset_in_page(p) + object_map_bytes > reply_len) {
2011 rbd_dev->object_map = kvmalloc(object_map_bytes, GFP_KERNEL);
2012 if (!rbd_dev->object_map) {
2017 rbd_dev->object_map_size = object_map_size;
2018 ceph_copy_from_page_vector(pages, rbd_dev->object_map,
2019 offset_in_page(p), object_map_bytes);
2022 ceph_release_page_vector(pages, num_pages);
2026 static void rbd_object_map_free(struct rbd_device *rbd_dev)
2028 kvfree(rbd_dev->object_map);
2029 rbd_dev->object_map = NULL;
2030 rbd_dev->object_map_size = 0;
2033 static int rbd_object_map_load(struct rbd_device *rbd_dev)
2037 ret = __rbd_object_map_load(rbd_dev);
2041 ret = rbd_dev_v2_get_flags(rbd_dev);
2043 rbd_object_map_free(rbd_dev);
2047 if (rbd_dev->object_map_flags & RBD_FLAG_OBJECT_MAP_INVALID)
2048 rbd_warn(rbd_dev, "object map is invalid");
2053 static int rbd_object_map_open(struct rbd_device *rbd_dev)
2057 ret = rbd_object_map_lock(rbd_dev);
2061 ret = rbd_object_map_load(rbd_dev);
2063 rbd_object_map_unlock(rbd_dev);
2070 static void rbd_object_map_close(struct rbd_device *rbd_dev)
2072 rbd_object_map_free(rbd_dev);
2073 rbd_object_map_unlock(rbd_dev);
2077 * This function needs snap_id (or more precisely just something to
2078 * distinguish between HEAD and snapshot object maps), new_state and
2079 * current_state that were passed to rbd_object_map_update().
2081 * To avoid allocating and stashing a context we piggyback on the OSD
2082 * request. A HEAD update has two ops (assert_locked). For new_state
2083 * and current_state we decode our own object_map_update op, encoded in
2084 * rbd_cls_object_map_update().
2086 static int rbd_object_map_update_finish(struct rbd_obj_request *obj_req,
2087 struct ceph_osd_request *osd_req)
2089 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2090 struct ceph_osd_data *osd_data;
2092 u8 state, new_state, current_state;
2093 bool has_current_state;
2096 if (osd_req->r_result)
2097 return osd_req->r_result;
2100 * Nothing to do for a snapshot object map.
2102 if (osd_req->r_num_ops == 1)
2106 * Update in-memory HEAD object map.
2108 rbd_assert(osd_req->r_num_ops == 2);
2109 osd_data = osd_req_op_data(osd_req, 1, cls, request_data);
2110 rbd_assert(osd_data->type == CEPH_OSD_DATA_TYPE_PAGES);
2112 p = page_address(osd_data->pages[0]);
2113 objno = ceph_decode_64(&p);
2114 rbd_assert(objno == obj_req->ex.oe_objno);
2115 rbd_assert(ceph_decode_64(&p) == objno + 1);
2116 new_state = ceph_decode_8(&p);
2117 has_current_state = ceph_decode_8(&p);
2118 if (has_current_state)
2119 current_state = ceph_decode_8(&p);
2121 spin_lock(&rbd_dev->object_map_lock);
2122 state = __rbd_object_map_get(rbd_dev, objno);
2123 if (!has_current_state || current_state == state ||
2124 (current_state == OBJECT_EXISTS && state == OBJECT_EXISTS_CLEAN))
2125 __rbd_object_map_set(rbd_dev, objno, new_state);
2126 spin_unlock(&rbd_dev->object_map_lock);
2131 static void rbd_object_map_callback(struct ceph_osd_request *osd_req)
2133 struct rbd_obj_request *obj_req = osd_req->r_priv;
2136 dout("%s osd_req %p result %d for obj_req %p\n", __func__, osd_req,
2137 osd_req->r_result, obj_req);
2139 result = rbd_object_map_update_finish(obj_req, osd_req);
2140 rbd_obj_handle_request(obj_req, result);
2143 static bool update_needed(struct rbd_device *rbd_dev, u64 objno, u8 new_state)
2145 u8 state = rbd_object_map_get(rbd_dev, objno);
2147 if (state == new_state ||
2148 (new_state == OBJECT_PENDING && state == OBJECT_NONEXISTENT) ||
2149 (new_state == OBJECT_NONEXISTENT && state != OBJECT_PENDING))
2155 static int rbd_cls_object_map_update(struct ceph_osd_request *req,
2156 int which, u64 objno, u8 new_state,
2157 const u8 *current_state)
2159 struct page **pages;
2163 ret = osd_req_op_cls_init(req, which, "rbd", "object_map_update");
2167 pages = ceph_alloc_page_vector(1, GFP_NOIO);
2169 return PTR_ERR(pages);
2171 p = start = page_address(pages[0]);
2172 ceph_encode_64(&p, objno);
2173 ceph_encode_64(&p, objno + 1);
2174 ceph_encode_8(&p, new_state);
2175 if (current_state) {
2176 ceph_encode_8(&p, 1);
2177 ceph_encode_8(&p, *current_state);
2179 ceph_encode_8(&p, 0);
2182 osd_req_op_cls_request_data_pages(req, which, pages, p - start, 0,
2189 * 0 - object map update sent
2190 * 1 - object map update isn't needed
2193 static int rbd_object_map_update(struct rbd_obj_request *obj_req, u64 snap_id,
2194 u8 new_state, const u8 *current_state)
2196 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2197 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2198 struct ceph_osd_request *req;
2203 if (snap_id == CEPH_NOSNAP) {
2204 if (!update_needed(rbd_dev, obj_req->ex.oe_objno, new_state))
2207 num_ops++; /* assert_locked */
2210 req = ceph_osdc_alloc_request(osdc, NULL, num_ops, false, GFP_NOIO);
2214 list_add_tail(&req->r_private_item, &obj_req->osd_reqs);
2215 req->r_callback = rbd_object_map_callback;
2216 req->r_priv = obj_req;
2218 rbd_object_map_name(rbd_dev, snap_id, &req->r_base_oid);
2219 ceph_oloc_copy(&req->r_base_oloc, &rbd_dev->header_oloc);
2220 req->r_flags = CEPH_OSD_FLAG_WRITE;
2221 ktime_get_real_ts64(&req->r_mtime);
2223 if (snap_id == CEPH_NOSNAP) {
2225 * Protect against possible race conditions during lock
2226 * ownership transitions.
2228 ret = ceph_cls_assert_locked(req, which++, RBD_LOCK_NAME,
2229 CEPH_CLS_LOCK_EXCLUSIVE, "", "");
2234 ret = rbd_cls_object_map_update(req, which, obj_req->ex.oe_objno,
2235 new_state, current_state);
2239 ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
2243 ceph_osdc_start_request(osdc, req, false);
2247 static void prune_extents(struct ceph_file_extent *img_extents,
2248 u32 *num_img_extents, u64 overlap)
2250 u32 cnt = *num_img_extents;
2252 /* drop extents completely beyond the overlap */
2253 while (cnt && img_extents[cnt - 1].fe_off >= overlap)
2257 struct ceph_file_extent *ex = &img_extents[cnt - 1];
2259 /* trim final overlapping extent */
2260 if (ex->fe_off + ex->fe_len > overlap)
2261 ex->fe_len = overlap - ex->fe_off;
2264 *num_img_extents = cnt;
2268 * Determine the byte range(s) covered by either just the object extent
2269 * or the entire object in the parent image.
2271 static int rbd_obj_calc_img_extents(struct rbd_obj_request *obj_req,
2274 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2277 if (!rbd_dev->parent_overlap)
2280 ret = ceph_extent_to_file(&rbd_dev->layout, obj_req->ex.oe_objno,
2281 entire ? 0 : obj_req->ex.oe_off,
2282 entire ? rbd_dev->layout.object_size :
2284 &obj_req->img_extents,
2285 &obj_req->num_img_extents);
2289 prune_extents(obj_req->img_extents, &obj_req->num_img_extents,
2290 rbd_dev->parent_overlap);
2294 static void rbd_osd_setup_data(struct ceph_osd_request *osd_req, int which)
2296 struct rbd_obj_request *obj_req = osd_req->r_priv;
2298 switch (obj_req->img_request->data_type) {
2299 case OBJ_REQUEST_BIO:
2300 osd_req_op_extent_osd_data_bio(osd_req, which,
2302 obj_req->ex.oe_len);
2304 case OBJ_REQUEST_BVECS:
2305 case OBJ_REQUEST_OWN_BVECS:
2306 rbd_assert(obj_req->bvec_pos.iter.bi_size ==
2307 obj_req->ex.oe_len);
2308 rbd_assert(obj_req->bvec_idx == obj_req->bvec_count);
2309 osd_req_op_extent_osd_data_bvec_pos(osd_req, which,
2310 &obj_req->bvec_pos);
2317 static int rbd_osd_setup_stat(struct ceph_osd_request *osd_req, int which)
2319 struct page **pages;
2322 * The response data for a STAT call consists of:
2329 pages = ceph_alloc_page_vector(1, GFP_NOIO);
2331 return PTR_ERR(pages);
2333 osd_req_op_init(osd_req, which, CEPH_OSD_OP_STAT, 0);
2334 osd_req_op_raw_data_in_pages(osd_req, which, pages,
2335 8 + sizeof(struct ceph_timespec),
2340 static int rbd_osd_setup_copyup(struct ceph_osd_request *osd_req, int which,
2343 struct rbd_obj_request *obj_req = osd_req->r_priv;
2346 ret = osd_req_op_cls_init(osd_req, which, "rbd", "copyup");
2350 osd_req_op_cls_request_data_bvecs(osd_req, which, obj_req->copyup_bvecs,
2351 obj_req->copyup_bvec_count, bytes);
2355 static int rbd_obj_init_read(struct rbd_obj_request *obj_req)
2357 obj_req->read_state = RBD_OBJ_READ_START;
2361 static void __rbd_osd_setup_write_ops(struct ceph_osd_request *osd_req,
2364 struct rbd_obj_request *obj_req = osd_req->r_priv;
2365 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2368 if (!use_object_map(rbd_dev) ||
2369 !(obj_req->flags & RBD_OBJ_FLAG_MAY_EXIST)) {
2370 osd_req_op_alloc_hint_init(osd_req, which++,
2371 rbd_dev->layout.object_size,
2372 rbd_dev->layout.object_size);
2375 if (rbd_obj_is_entire(obj_req))
2376 opcode = CEPH_OSD_OP_WRITEFULL;
2378 opcode = CEPH_OSD_OP_WRITE;
2380 osd_req_op_extent_init(osd_req, which, opcode,
2381 obj_req->ex.oe_off, obj_req->ex.oe_len, 0, 0);
2382 rbd_osd_setup_data(osd_req, which);
2385 static int rbd_obj_init_write(struct rbd_obj_request *obj_req)
2389 /* reverse map the entire object onto the parent */
2390 ret = rbd_obj_calc_img_extents(obj_req, true);
2394 if (rbd_obj_copyup_enabled(obj_req))
2395 obj_req->flags |= RBD_OBJ_FLAG_COPYUP_ENABLED;
2397 obj_req->write_state = RBD_OBJ_WRITE_START;
2401 static u16 truncate_or_zero_opcode(struct rbd_obj_request *obj_req)
2403 return rbd_obj_is_tail(obj_req) ? CEPH_OSD_OP_TRUNCATE :
2407 static void __rbd_osd_setup_discard_ops(struct ceph_osd_request *osd_req,
2410 struct rbd_obj_request *obj_req = osd_req->r_priv;
2412 if (rbd_obj_is_entire(obj_req) && !obj_req->num_img_extents) {
2413 rbd_assert(obj_req->flags & RBD_OBJ_FLAG_DELETION);
2414 osd_req_op_init(osd_req, which, CEPH_OSD_OP_DELETE, 0);
2416 osd_req_op_extent_init(osd_req, which,
2417 truncate_or_zero_opcode(obj_req),
2418 obj_req->ex.oe_off, obj_req->ex.oe_len,
2423 static int rbd_obj_init_discard(struct rbd_obj_request *obj_req)
2425 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2430 * Align the range to alloc_size boundary and punt on discards
2431 * that are too small to free up any space.
2433 * alloc_size == object_size && is_tail() is a special case for
2434 * filestore with filestore_punch_hole = false, needed to allow
2435 * truncate (in addition to delete).
2437 if (rbd_dev->opts->alloc_size != rbd_dev->layout.object_size ||
2438 !rbd_obj_is_tail(obj_req)) {
2439 off = round_up(obj_req->ex.oe_off, rbd_dev->opts->alloc_size);
2440 next_off = round_down(obj_req->ex.oe_off + obj_req->ex.oe_len,
2441 rbd_dev->opts->alloc_size);
2442 if (off >= next_off)
2445 dout("%s %p %llu~%llu -> %llu~%llu\n", __func__,
2446 obj_req, obj_req->ex.oe_off, obj_req->ex.oe_len,
2447 off, next_off - off);
2448 obj_req->ex.oe_off = off;
2449 obj_req->ex.oe_len = next_off - off;
2452 /* reverse map the entire object onto the parent */
2453 ret = rbd_obj_calc_img_extents(obj_req, true);
2457 obj_req->flags |= RBD_OBJ_FLAG_NOOP_FOR_NONEXISTENT;
2458 if (rbd_obj_is_entire(obj_req) && !obj_req->num_img_extents)
2459 obj_req->flags |= RBD_OBJ_FLAG_DELETION;
2461 obj_req->write_state = RBD_OBJ_WRITE_START;
2465 static void __rbd_osd_setup_zeroout_ops(struct ceph_osd_request *osd_req,
2468 struct rbd_obj_request *obj_req = osd_req->r_priv;
2471 if (rbd_obj_is_entire(obj_req)) {
2472 if (obj_req->num_img_extents) {
2473 if (!(obj_req->flags & RBD_OBJ_FLAG_COPYUP_ENABLED))
2474 osd_req_op_init(osd_req, which++,
2475 CEPH_OSD_OP_CREATE, 0);
2476 opcode = CEPH_OSD_OP_TRUNCATE;
2478 rbd_assert(obj_req->flags & RBD_OBJ_FLAG_DELETION);
2479 osd_req_op_init(osd_req, which++,
2480 CEPH_OSD_OP_DELETE, 0);
2484 opcode = truncate_or_zero_opcode(obj_req);
2488 osd_req_op_extent_init(osd_req, which, opcode,
2489 obj_req->ex.oe_off, obj_req->ex.oe_len,
2493 static int rbd_obj_init_zeroout(struct rbd_obj_request *obj_req)
2497 /* reverse map the entire object onto the parent */
2498 ret = rbd_obj_calc_img_extents(obj_req, true);
2502 if (rbd_obj_copyup_enabled(obj_req))
2503 obj_req->flags |= RBD_OBJ_FLAG_COPYUP_ENABLED;
2504 if (!obj_req->num_img_extents) {
2505 obj_req->flags |= RBD_OBJ_FLAG_NOOP_FOR_NONEXISTENT;
2506 if (rbd_obj_is_entire(obj_req))
2507 obj_req->flags |= RBD_OBJ_FLAG_DELETION;
2510 obj_req->write_state = RBD_OBJ_WRITE_START;
2514 static int count_write_ops(struct rbd_obj_request *obj_req)
2516 struct rbd_img_request *img_req = obj_req->img_request;
2518 switch (img_req->op_type) {
2520 if (!use_object_map(img_req->rbd_dev) ||
2521 !(obj_req->flags & RBD_OBJ_FLAG_MAY_EXIST))
2522 return 2; /* setallochint + write/writefull */
2524 return 1; /* write/writefull */
2525 case OBJ_OP_DISCARD:
2526 return 1; /* delete/truncate/zero */
2527 case OBJ_OP_ZEROOUT:
2528 if (rbd_obj_is_entire(obj_req) && obj_req->num_img_extents &&
2529 !(obj_req->flags & RBD_OBJ_FLAG_COPYUP_ENABLED))
2530 return 2; /* create + truncate */
2532 return 1; /* delete/truncate/zero */
2538 static void rbd_osd_setup_write_ops(struct ceph_osd_request *osd_req,
2541 struct rbd_obj_request *obj_req = osd_req->r_priv;
2543 switch (obj_req->img_request->op_type) {
2545 __rbd_osd_setup_write_ops(osd_req, which);
2547 case OBJ_OP_DISCARD:
2548 __rbd_osd_setup_discard_ops(osd_req, which);
2550 case OBJ_OP_ZEROOUT:
2551 __rbd_osd_setup_zeroout_ops(osd_req, which);
2559 * Prune the list of object requests (adjust offset and/or length, drop
2560 * redundant requests). Prepare object request state machines and image
2561 * request state machine for execution.
2563 static int __rbd_img_fill_request(struct rbd_img_request *img_req)
2565 struct rbd_obj_request *obj_req, *next_obj_req;
2568 for_each_obj_request_safe(img_req, obj_req, next_obj_req) {
2569 switch (img_req->op_type) {
2571 ret = rbd_obj_init_read(obj_req);
2574 ret = rbd_obj_init_write(obj_req);
2576 case OBJ_OP_DISCARD:
2577 ret = rbd_obj_init_discard(obj_req);
2579 case OBJ_OP_ZEROOUT:
2580 ret = rbd_obj_init_zeroout(obj_req);
2588 rbd_img_obj_request_del(img_req, obj_req);
2593 img_req->state = RBD_IMG_START;
2597 union rbd_img_fill_iter {
2598 struct ceph_bio_iter bio_iter;
2599 struct ceph_bvec_iter bvec_iter;
2602 struct rbd_img_fill_ctx {
2603 enum obj_request_type pos_type;
2604 union rbd_img_fill_iter *pos;
2605 union rbd_img_fill_iter iter;
2606 ceph_object_extent_fn_t set_pos_fn;
2607 ceph_object_extent_fn_t count_fn;
2608 ceph_object_extent_fn_t copy_fn;
2611 static struct ceph_object_extent *alloc_object_extent(void *arg)
2613 struct rbd_img_request *img_req = arg;
2614 struct rbd_obj_request *obj_req;
2616 obj_req = rbd_obj_request_create();
2620 rbd_img_obj_request_add(img_req, obj_req);
2621 return &obj_req->ex;
2625 * While su != os && sc == 1 is technically not fancy (it's the same
2626 * layout as su == os && sc == 1), we can't use the nocopy path for it
2627 * because ->set_pos_fn() should be called only once per object.
2628 * ceph_file_to_extents() invokes action_fn once per stripe unit, so
2629 * treat su != os && sc == 1 as fancy.
2631 static bool rbd_layout_is_fancy(struct ceph_file_layout *l)
2633 return l->stripe_unit != l->object_size;
2636 static int rbd_img_fill_request_nocopy(struct rbd_img_request *img_req,
2637 struct ceph_file_extent *img_extents,
2638 u32 num_img_extents,
2639 struct rbd_img_fill_ctx *fctx)
2644 img_req->data_type = fctx->pos_type;
2647 * Create object requests and set each object request's starting
2648 * position in the provided bio (list) or bio_vec array.
2650 fctx->iter = *fctx->pos;
2651 for (i = 0; i < num_img_extents; i++) {
2652 ret = ceph_file_to_extents(&img_req->rbd_dev->layout,
2653 img_extents[i].fe_off,
2654 img_extents[i].fe_len,
2655 &img_req->object_extents,
2656 alloc_object_extent, img_req,
2657 fctx->set_pos_fn, &fctx->iter);
2662 return __rbd_img_fill_request(img_req);
2666 * Map a list of image extents to a list of object extents, create the
2667 * corresponding object requests (normally each to a different object,
2668 * but not always) and add them to @img_req. For each object request,
2669 * set up its data descriptor to point to the corresponding chunk(s) of
2670 * @fctx->pos data buffer.
2672 * Because ceph_file_to_extents() will merge adjacent object extents
2673 * together, each object request's data descriptor may point to multiple
2674 * different chunks of @fctx->pos data buffer.
2676 * @fctx->pos data buffer is assumed to be large enough.
2678 static int rbd_img_fill_request(struct rbd_img_request *img_req,
2679 struct ceph_file_extent *img_extents,
2680 u32 num_img_extents,
2681 struct rbd_img_fill_ctx *fctx)
2683 struct rbd_device *rbd_dev = img_req->rbd_dev;
2684 struct rbd_obj_request *obj_req;
2688 if (fctx->pos_type == OBJ_REQUEST_NODATA ||
2689 !rbd_layout_is_fancy(&rbd_dev->layout))
2690 return rbd_img_fill_request_nocopy(img_req, img_extents,
2691 num_img_extents, fctx);
2693 img_req->data_type = OBJ_REQUEST_OWN_BVECS;
2696 * Create object requests and determine ->bvec_count for each object
2697 * request. Note that ->bvec_count sum over all object requests may
2698 * be greater than the number of bio_vecs in the provided bio (list)
2699 * or bio_vec array because when mapped, those bio_vecs can straddle
2700 * stripe unit boundaries.
2702 fctx->iter = *fctx->pos;
2703 for (i = 0; i < num_img_extents; i++) {
2704 ret = ceph_file_to_extents(&rbd_dev->layout,
2705 img_extents[i].fe_off,
2706 img_extents[i].fe_len,
2707 &img_req->object_extents,
2708 alloc_object_extent, img_req,
2709 fctx->count_fn, &fctx->iter);
2714 for_each_obj_request(img_req, obj_req) {
2715 obj_req->bvec_pos.bvecs = kmalloc_array(obj_req->bvec_count,
2716 sizeof(*obj_req->bvec_pos.bvecs),
2718 if (!obj_req->bvec_pos.bvecs)
2723 * Fill in each object request's private bio_vec array, splitting and
2724 * rearranging the provided bio_vecs in stripe unit chunks as needed.
2726 fctx->iter = *fctx->pos;
2727 for (i = 0; i < num_img_extents; i++) {
2728 ret = ceph_iterate_extents(&rbd_dev->layout,
2729 img_extents[i].fe_off,
2730 img_extents[i].fe_len,
2731 &img_req->object_extents,
2732 fctx->copy_fn, &fctx->iter);
2737 return __rbd_img_fill_request(img_req);
2740 static int rbd_img_fill_nodata(struct rbd_img_request *img_req,
2743 struct ceph_file_extent ex = { off, len };
2744 union rbd_img_fill_iter dummy;
2745 struct rbd_img_fill_ctx fctx = {
2746 .pos_type = OBJ_REQUEST_NODATA,
2750 return rbd_img_fill_request(img_req, &ex, 1, &fctx);
2753 static void set_bio_pos(struct ceph_object_extent *ex, u32 bytes, void *arg)
2755 struct rbd_obj_request *obj_req =
2756 container_of(ex, struct rbd_obj_request, ex);
2757 struct ceph_bio_iter *it = arg;
2759 dout("%s objno %llu bytes %u\n", __func__, ex->oe_objno, bytes);
2760 obj_req->bio_pos = *it;
2761 ceph_bio_iter_advance(it, bytes);
2764 static void count_bio_bvecs(struct ceph_object_extent *ex, u32 bytes, void *arg)
2766 struct rbd_obj_request *obj_req =
2767 container_of(ex, struct rbd_obj_request, ex);
2768 struct ceph_bio_iter *it = arg;
2770 dout("%s objno %llu bytes %u\n", __func__, ex->oe_objno, bytes);
2771 ceph_bio_iter_advance_step(it, bytes, ({
2772 obj_req->bvec_count++;
2777 static void copy_bio_bvecs(struct ceph_object_extent *ex, u32 bytes, void *arg)
2779 struct rbd_obj_request *obj_req =
2780 container_of(ex, struct rbd_obj_request, ex);
2781 struct ceph_bio_iter *it = arg;
2783 dout("%s objno %llu bytes %u\n", __func__, ex->oe_objno, bytes);
2784 ceph_bio_iter_advance_step(it, bytes, ({
2785 obj_req->bvec_pos.bvecs[obj_req->bvec_idx++] = bv;
2786 obj_req->bvec_pos.iter.bi_size += bv.bv_len;
2790 static int __rbd_img_fill_from_bio(struct rbd_img_request *img_req,
2791 struct ceph_file_extent *img_extents,
2792 u32 num_img_extents,
2793 struct ceph_bio_iter *bio_pos)
2795 struct rbd_img_fill_ctx fctx = {
2796 .pos_type = OBJ_REQUEST_BIO,
2797 .pos = (union rbd_img_fill_iter *)bio_pos,
2798 .set_pos_fn = set_bio_pos,
2799 .count_fn = count_bio_bvecs,
2800 .copy_fn = copy_bio_bvecs,
2803 return rbd_img_fill_request(img_req, img_extents, num_img_extents,
2807 static int rbd_img_fill_from_bio(struct rbd_img_request *img_req,
2808 u64 off, u64 len, struct bio *bio)
2810 struct ceph_file_extent ex = { off, len };
2811 struct ceph_bio_iter it = { .bio = bio, .iter = bio->bi_iter };
2813 return __rbd_img_fill_from_bio(img_req, &ex, 1, &it);
2816 static void set_bvec_pos(struct ceph_object_extent *ex, u32 bytes, void *arg)
2818 struct rbd_obj_request *obj_req =
2819 container_of(ex, struct rbd_obj_request, ex);
2820 struct ceph_bvec_iter *it = arg;
2822 obj_req->bvec_pos = *it;
2823 ceph_bvec_iter_shorten(&obj_req->bvec_pos, bytes);
2824 ceph_bvec_iter_advance(it, bytes);
2827 static void count_bvecs(struct ceph_object_extent *ex, u32 bytes, void *arg)
2829 struct rbd_obj_request *obj_req =
2830 container_of(ex, struct rbd_obj_request, ex);
2831 struct ceph_bvec_iter *it = arg;
2833 ceph_bvec_iter_advance_step(it, bytes, ({
2834 obj_req->bvec_count++;
2838 static void copy_bvecs(struct ceph_object_extent *ex, u32 bytes, void *arg)
2840 struct rbd_obj_request *obj_req =
2841 container_of(ex, struct rbd_obj_request, ex);
2842 struct ceph_bvec_iter *it = arg;
2844 ceph_bvec_iter_advance_step(it, bytes, ({
2845 obj_req->bvec_pos.bvecs[obj_req->bvec_idx++] = bv;
2846 obj_req->bvec_pos.iter.bi_size += bv.bv_len;
2850 static int __rbd_img_fill_from_bvecs(struct rbd_img_request *img_req,
2851 struct ceph_file_extent *img_extents,
2852 u32 num_img_extents,
2853 struct ceph_bvec_iter *bvec_pos)
2855 struct rbd_img_fill_ctx fctx = {
2856 .pos_type = OBJ_REQUEST_BVECS,
2857 .pos = (union rbd_img_fill_iter *)bvec_pos,
2858 .set_pos_fn = set_bvec_pos,
2859 .count_fn = count_bvecs,
2860 .copy_fn = copy_bvecs,
2863 return rbd_img_fill_request(img_req, img_extents, num_img_extents,
2867 static int rbd_img_fill_from_bvecs(struct rbd_img_request *img_req,
2868 struct ceph_file_extent *img_extents,
2869 u32 num_img_extents,
2870 struct bio_vec *bvecs)
2872 struct ceph_bvec_iter it = {
2874 .iter = { .bi_size = ceph_file_extents_bytes(img_extents,
2878 return __rbd_img_fill_from_bvecs(img_req, img_extents, num_img_extents,
2882 static void rbd_img_handle_request_work(struct work_struct *work)
2884 struct rbd_img_request *img_req =
2885 container_of(work, struct rbd_img_request, work);
2887 rbd_img_handle_request(img_req, img_req->work_result);
2890 static void rbd_img_schedule(struct rbd_img_request *img_req, int result)
2892 INIT_WORK(&img_req->work, rbd_img_handle_request_work);
2893 img_req->work_result = result;
2894 queue_work(rbd_wq, &img_req->work);
2897 static bool rbd_obj_may_exist(struct rbd_obj_request *obj_req)
2899 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2901 if (rbd_object_map_may_exist(rbd_dev, obj_req->ex.oe_objno)) {
2902 obj_req->flags |= RBD_OBJ_FLAG_MAY_EXIST;
2906 dout("%s %p objno %llu assuming dne\n", __func__, obj_req,
2907 obj_req->ex.oe_objno);
2911 static int rbd_obj_read_object(struct rbd_obj_request *obj_req)
2913 struct ceph_osd_request *osd_req;
2916 osd_req = __rbd_obj_add_osd_request(obj_req, NULL, 1);
2917 if (IS_ERR(osd_req))
2918 return PTR_ERR(osd_req);
2920 osd_req_op_extent_init(osd_req, 0, CEPH_OSD_OP_READ,
2921 obj_req->ex.oe_off, obj_req->ex.oe_len, 0, 0);
2922 rbd_osd_setup_data(osd_req, 0);
2923 rbd_osd_format_read(osd_req);
2925 ret = ceph_osdc_alloc_messages(osd_req, GFP_NOIO);
2929 rbd_osd_submit(osd_req);
2933 static int rbd_obj_read_from_parent(struct rbd_obj_request *obj_req)
2935 struct rbd_img_request *img_req = obj_req->img_request;
2936 struct rbd_img_request *child_img_req;
2939 child_img_req = rbd_img_request_create(img_req->rbd_dev->parent,
2944 __set_bit(IMG_REQ_CHILD, &child_img_req->flags);
2945 child_img_req->obj_request = obj_req;
2947 if (!rbd_img_is_write(img_req)) {
2948 switch (img_req->data_type) {
2949 case OBJ_REQUEST_BIO:
2950 ret = __rbd_img_fill_from_bio(child_img_req,
2951 obj_req->img_extents,
2952 obj_req->num_img_extents,
2955 case OBJ_REQUEST_BVECS:
2956 case OBJ_REQUEST_OWN_BVECS:
2957 ret = __rbd_img_fill_from_bvecs(child_img_req,
2958 obj_req->img_extents,
2959 obj_req->num_img_extents,
2960 &obj_req->bvec_pos);
2966 ret = rbd_img_fill_from_bvecs(child_img_req,
2967 obj_req->img_extents,
2968 obj_req->num_img_extents,
2969 obj_req->copyup_bvecs);
2972 rbd_img_request_put(child_img_req);
2976 /* avoid parent chain recursion */
2977 rbd_img_schedule(child_img_req, 0);
2981 static bool rbd_obj_advance_read(struct rbd_obj_request *obj_req, int *result)
2983 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2987 switch (obj_req->read_state) {
2988 case RBD_OBJ_READ_START:
2989 rbd_assert(!*result);
2991 if (!rbd_obj_may_exist(obj_req)) {
2993 obj_req->read_state = RBD_OBJ_READ_OBJECT;
2997 ret = rbd_obj_read_object(obj_req);
3002 obj_req->read_state = RBD_OBJ_READ_OBJECT;
3004 case RBD_OBJ_READ_OBJECT:
3005 if (*result == -ENOENT && rbd_dev->parent_overlap) {
3006 /* reverse map this object extent onto the parent */
3007 ret = rbd_obj_calc_img_extents(obj_req, false);
3012 if (obj_req->num_img_extents) {
3013 ret = rbd_obj_read_from_parent(obj_req);
3018 obj_req->read_state = RBD_OBJ_READ_PARENT;
3024 * -ENOENT means a hole in the image -- zero-fill the entire
3025 * length of the request. A short read also implies zero-fill
3026 * to the end of the request.
3028 if (*result == -ENOENT) {
3029 rbd_obj_zero_range(obj_req, 0, obj_req->ex.oe_len);
3031 } else if (*result >= 0) {
3032 if (*result < obj_req->ex.oe_len)
3033 rbd_obj_zero_range(obj_req, *result,
3034 obj_req->ex.oe_len - *result);
3036 rbd_assert(*result == obj_req->ex.oe_len);
3040 case RBD_OBJ_READ_PARENT:
3042 * The parent image is read only up to the overlap -- zero-fill
3043 * from the overlap to the end of the request.
3046 u32 obj_overlap = rbd_obj_img_extents_bytes(obj_req);
3048 if (obj_overlap < obj_req->ex.oe_len)
3049 rbd_obj_zero_range(obj_req, obj_overlap,
3050 obj_req->ex.oe_len - obj_overlap);
3058 static bool rbd_obj_write_is_noop(struct rbd_obj_request *obj_req)
3060 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
3062 if (rbd_object_map_may_exist(rbd_dev, obj_req->ex.oe_objno))
3063 obj_req->flags |= RBD_OBJ_FLAG_MAY_EXIST;
3065 if (!(obj_req->flags & RBD_OBJ_FLAG_MAY_EXIST) &&
3066 (obj_req->flags & RBD_OBJ_FLAG_NOOP_FOR_NONEXISTENT)) {
3067 dout("%s %p noop for nonexistent\n", __func__, obj_req);
3076 * 0 - object map update sent
3077 * 1 - object map update isn't needed
3080 static int rbd_obj_write_pre_object_map(struct rbd_obj_request *obj_req)
3082 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
3085 if (!(rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP))
3088 if (obj_req->flags & RBD_OBJ_FLAG_DELETION)
3089 new_state = OBJECT_PENDING;
3091 new_state = OBJECT_EXISTS;
3093 return rbd_object_map_update(obj_req, CEPH_NOSNAP, new_state, NULL);
3096 static int rbd_obj_write_object(struct rbd_obj_request *obj_req)
3098 struct ceph_osd_request *osd_req;
3099 int num_ops = count_write_ops(obj_req);
3103 if (obj_req->flags & RBD_OBJ_FLAG_COPYUP_ENABLED)
3104 num_ops++; /* stat */
3106 osd_req = rbd_obj_add_osd_request(obj_req, num_ops);
3107 if (IS_ERR(osd_req))
3108 return PTR_ERR(osd_req);
3110 if (obj_req->flags & RBD_OBJ_FLAG_COPYUP_ENABLED) {
3111 ret = rbd_osd_setup_stat(osd_req, which++);
3116 rbd_osd_setup_write_ops(osd_req, which);
3117 rbd_osd_format_write(osd_req);
3119 ret = ceph_osdc_alloc_messages(osd_req, GFP_NOIO);
3123 rbd_osd_submit(osd_req);
3128 * copyup_bvecs pages are never highmem pages
3130 static bool is_zero_bvecs(struct bio_vec *bvecs, u32 bytes)
3132 struct ceph_bvec_iter it = {
3134 .iter = { .bi_size = bytes },
3137 ceph_bvec_iter_advance_step(&it, bytes, ({
3138 if (memchr_inv(page_address(bv.bv_page) + bv.bv_offset, 0,
3145 #define MODS_ONLY U32_MAX
3147 static int rbd_obj_copyup_empty_snapc(struct rbd_obj_request *obj_req,
3150 struct ceph_osd_request *osd_req;
3153 dout("%s obj_req %p bytes %u\n", __func__, obj_req, bytes);
3154 rbd_assert(bytes > 0 && bytes != MODS_ONLY);
3156 osd_req = __rbd_obj_add_osd_request(obj_req, &rbd_empty_snapc, 1);
3157 if (IS_ERR(osd_req))
3158 return PTR_ERR(osd_req);
3160 ret = rbd_osd_setup_copyup(osd_req, 0, bytes);
3164 rbd_osd_format_write(osd_req);
3166 ret = ceph_osdc_alloc_messages(osd_req, GFP_NOIO);
3170 rbd_osd_submit(osd_req);
3174 static int rbd_obj_copyup_current_snapc(struct rbd_obj_request *obj_req,
3177 struct ceph_osd_request *osd_req;
3178 int num_ops = count_write_ops(obj_req);
3182 dout("%s obj_req %p bytes %u\n", __func__, obj_req, bytes);
3184 if (bytes != MODS_ONLY)
3185 num_ops++; /* copyup */
3187 osd_req = rbd_obj_add_osd_request(obj_req, num_ops);
3188 if (IS_ERR(osd_req))
3189 return PTR_ERR(osd_req);
3191 if (bytes != MODS_ONLY) {
3192 ret = rbd_osd_setup_copyup(osd_req, which++, bytes);
3197 rbd_osd_setup_write_ops(osd_req, which);
3198 rbd_osd_format_write(osd_req);
3200 ret = ceph_osdc_alloc_messages(osd_req, GFP_NOIO);
3204 rbd_osd_submit(osd_req);
3208 static int setup_copyup_bvecs(struct rbd_obj_request *obj_req, u64 obj_overlap)
3212 rbd_assert(!obj_req->copyup_bvecs);
3213 obj_req->copyup_bvec_count = calc_pages_for(0, obj_overlap);
3214 obj_req->copyup_bvecs = kcalloc(obj_req->copyup_bvec_count,
3215 sizeof(*obj_req->copyup_bvecs),
3217 if (!obj_req->copyup_bvecs)
3220 for (i = 0; i < obj_req->copyup_bvec_count; i++) {
3221 unsigned int len = min(obj_overlap, (u64)PAGE_SIZE);
3223 obj_req->copyup_bvecs[i].bv_page = alloc_page(GFP_NOIO);
3224 if (!obj_req->copyup_bvecs[i].bv_page)
3227 obj_req->copyup_bvecs[i].bv_offset = 0;
3228 obj_req->copyup_bvecs[i].bv_len = len;
3232 rbd_assert(!obj_overlap);
3237 * The target object doesn't exist. Read the data for the entire
3238 * target object up to the overlap point (if any) from the parent,
3239 * so we can use it for a copyup.
3241 static int rbd_obj_copyup_read_parent(struct rbd_obj_request *obj_req)
3243 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
3246 rbd_assert(obj_req->num_img_extents);
3247 prune_extents(obj_req->img_extents, &obj_req->num_img_extents,
3248 rbd_dev->parent_overlap);
3249 if (!obj_req->num_img_extents) {
3251 * The overlap has become 0 (most likely because the
3252 * image has been flattened). Re-submit the original write
3253 * request -- pass MODS_ONLY since the copyup isn't needed
3256 return rbd_obj_copyup_current_snapc(obj_req, MODS_ONLY);
3259 ret = setup_copyup_bvecs(obj_req, rbd_obj_img_extents_bytes(obj_req));
3263 return rbd_obj_read_from_parent(obj_req);
3266 static void rbd_obj_copyup_object_maps(struct rbd_obj_request *obj_req)
3268 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
3269 struct ceph_snap_context *snapc = obj_req->img_request->snapc;
3274 rbd_assert(!obj_req->pending.result && !obj_req->pending.num_pending);
3276 if (!(rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP))
3279 if (obj_req->flags & RBD_OBJ_FLAG_COPYUP_ZEROS)
3282 for (i = 0; i < snapc->num_snaps; i++) {
3283 if ((rbd_dev->header.features & RBD_FEATURE_FAST_DIFF) &&
3284 i + 1 < snapc->num_snaps)
3285 new_state = OBJECT_EXISTS_CLEAN;
3287 new_state = OBJECT_EXISTS;
3289 ret = rbd_object_map_update(obj_req, snapc->snaps[i],
3292 obj_req->pending.result = ret;
3297 obj_req->pending.num_pending++;
3301 static void rbd_obj_copyup_write_object(struct rbd_obj_request *obj_req)
3303 u32 bytes = rbd_obj_img_extents_bytes(obj_req);
3306 rbd_assert(!obj_req->pending.result && !obj_req->pending.num_pending);
3309 * Only send non-zero copyup data to save some I/O and network
3310 * bandwidth -- zero copyup data is equivalent to the object not
3313 if (obj_req->flags & RBD_OBJ_FLAG_COPYUP_ZEROS)
3316 if (obj_req->img_request->snapc->num_snaps && bytes > 0) {
3318 * Send a copyup request with an empty snapshot context to
3319 * deep-copyup the object through all existing snapshots.
3320 * A second request with the current snapshot context will be
3321 * sent for the actual modification.
3323 ret = rbd_obj_copyup_empty_snapc(obj_req, bytes);
3325 obj_req->pending.result = ret;
3329 obj_req->pending.num_pending++;
3333 ret = rbd_obj_copyup_current_snapc(obj_req, bytes);
3335 obj_req->pending.result = ret;
3339 obj_req->pending.num_pending++;
3342 static bool rbd_obj_advance_copyup(struct rbd_obj_request *obj_req, int *result)
3344 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
3348 switch (obj_req->copyup_state) {
3349 case RBD_OBJ_COPYUP_START:
3350 rbd_assert(!*result);
3352 ret = rbd_obj_copyup_read_parent(obj_req);
3357 if (obj_req->num_img_extents)
3358 obj_req->copyup_state = RBD_OBJ_COPYUP_READ_PARENT;
3360 obj_req->copyup_state = RBD_OBJ_COPYUP_WRITE_OBJECT;
3362 case RBD_OBJ_COPYUP_READ_PARENT:
3366 if (is_zero_bvecs(obj_req->copyup_bvecs,
3367 rbd_obj_img_extents_bytes(obj_req))) {
3368 dout("%s %p detected zeros\n", __func__, obj_req);
3369 obj_req->flags |= RBD_OBJ_FLAG_COPYUP_ZEROS;
3372 rbd_obj_copyup_object_maps(obj_req);
3373 if (!obj_req->pending.num_pending) {
3374 *result = obj_req->pending.result;
3375 obj_req->copyup_state = RBD_OBJ_COPYUP_OBJECT_MAPS;
3378 obj_req->copyup_state = __RBD_OBJ_COPYUP_OBJECT_MAPS;
3380 case __RBD_OBJ_COPYUP_OBJECT_MAPS:
3381 if (!pending_result_dec(&obj_req->pending, result))
3384 case RBD_OBJ_COPYUP_OBJECT_MAPS:
3386 rbd_warn(rbd_dev, "snap object map update failed: %d",
3391 rbd_obj_copyup_write_object(obj_req);
3392 if (!obj_req->pending.num_pending) {
3393 *result = obj_req->pending.result;
3394 obj_req->copyup_state = RBD_OBJ_COPYUP_WRITE_OBJECT;
3397 obj_req->copyup_state = __RBD_OBJ_COPYUP_WRITE_OBJECT;
3399 case __RBD_OBJ_COPYUP_WRITE_OBJECT:
3400 if (!pending_result_dec(&obj_req->pending, result))
3403 case RBD_OBJ_COPYUP_WRITE_OBJECT:
3412 * 0 - object map update sent
3413 * 1 - object map update isn't needed
3416 static int rbd_obj_write_post_object_map(struct rbd_obj_request *obj_req)
3418 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
3419 u8 current_state = OBJECT_PENDING;
3421 if (!(rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP))
3424 if (!(obj_req->flags & RBD_OBJ_FLAG_DELETION))
3427 return rbd_object_map_update(obj_req, CEPH_NOSNAP, OBJECT_NONEXISTENT,
3431 static bool rbd_obj_advance_write(struct rbd_obj_request *obj_req, int *result)
3433 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
3437 switch (obj_req->write_state) {
3438 case RBD_OBJ_WRITE_START:
3439 rbd_assert(!*result);
3441 if (rbd_obj_write_is_noop(obj_req))
3444 ret = rbd_obj_write_pre_object_map(obj_req);
3449 obj_req->write_state = RBD_OBJ_WRITE_PRE_OBJECT_MAP;
3453 case RBD_OBJ_WRITE_PRE_OBJECT_MAP:
3455 rbd_warn(rbd_dev, "pre object map update failed: %d",
3459 ret = rbd_obj_write_object(obj_req);
3464 obj_req->write_state = RBD_OBJ_WRITE_OBJECT;
3466 case RBD_OBJ_WRITE_OBJECT:
3467 if (*result == -ENOENT) {
3468 if (obj_req->flags & RBD_OBJ_FLAG_COPYUP_ENABLED) {
3470 obj_req->copyup_state = RBD_OBJ_COPYUP_START;
3471 obj_req->write_state = __RBD_OBJ_WRITE_COPYUP;
3475 * On a non-existent object:
3476 * delete - -ENOENT, truncate/zero - 0
3478 if (obj_req->flags & RBD_OBJ_FLAG_DELETION)
3484 obj_req->write_state = RBD_OBJ_WRITE_COPYUP;
3486 case __RBD_OBJ_WRITE_COPYUP:
3487 if (!rbd_obj_advance_copyup(obj_req, result))
3490 case RBD_OBJ_WRITE_COPYUP:
3492 rbd_warn(rbd_dev, "copyup failed: %d", *result);
3495 ret = rbd_obj_write_post_object_map(obj_req);
3500 obj_req->write_state = RBD_OBJ_WRITE_POST_OBJECT_MAP;
3504 case RBD_OBJ_WRITE_POST_OBJECT_MAP:
3506 rbd_warn(rbd_dev, "post object map update failed: %d",
3515 * Return true if @obj_req is completed.
3517 static bool __rbd_obj_handle_request(struct rbd_obj_request *obj_req,
3520 struct rbd_img_request *img_req = obj_req->img_request;
3521 struct rbd_device *rbd_dev = img_req->rbd_dev;
3524 mutex_lock(&obj_req->state_mutex);
3525 if (!rbd_img_is_write(img_req))
3526 done = rbd_obj_advance_read(obj_req, result);
3528 done = rbd_obj_advance_write(obj_req, result);
3529 mutex_unlock(&obj_req->state_mutex);
3531 if (done && *result) {
3532 rbd_assert(*result < 0);
3533 rbd_warn(rbd_dev, "%s at objno %llu %llu~%llu result %d",
3534 obj_op_name(img_req->op_type), obj_req->ex.oe_objno,
3535 obj_req->ex.oe_off, obj_req->ex.oe_len, *result);
3541 * This is open-coded in rbd_img_handle_request() to avoid parent chain
3544 static void rbd_obj_handle_request(struct rbd_obj_request *obj_req, int result)
3546 if (__rbd_obj_handle_request(obj_req, &result))
3547 rbd_img_handle_request(obj_req->img_request, result);
3550 static bool need_exclusive_lock(struct rbd_img_request *img_req)
3552 struct rbd_device *rbd_dev = img_req->rbd_dev;
3554 if (!(rbd_dev->header.features & RBD_FEATURE_EXCLUSIVE_LOCK))
3557 if (rbd_dev->spec->snap_id != CEPH_NOSNAP)
3560 rbd_assert(!test_bit(IMG_REQ_CHILD, &img_req->flags));
3561 if (rbd_dev->opts->lock_on_read ||
3562 (rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP))
3565 return rbd_img_is_write(img_req);
3568 static bool rbd_lock_add_request(struct rbd_img_request *img_req)
3570 struct rbd_device *rbd_dev = img_req->rbd_dev;
3573 lockdep_assert_held(&rbd_dev->lock_rwsem);
3574 locked = rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED;
3575 spin_lock(&rbd_dev->lock_lists_lock);
3576 rbd_assert(list_empty(&img_req->lock_item));
3578 list_add_tail(&img_req->lock_item, &rbd_dev->acquiring_list);
3580 list_add_tail(&img_req->lock_item, &rbd_dev->running_list);
3581 spin_unlock(&rbd_dev->lock_lists_lock);
3585 static void rbd_lock_del_request(struct rbd_img_request *img_req)
3587 struct rbd_device *rbd_dev = img_req->rbd_dev;
3590 lockdep_assert_held(&rbd_dev->lock_rwsem);
3591 spin_lock(&rbd_dev->lock_lists_lock);
3592 rbd_assert(!list_empty(&img_req->lock_item));
3593 list_del_init(&img_req->lock_item);
3594 need_wakeup = (rbd_dev->lock_state == RBD_LOCK_STATE_RELEASING &&
3595 list_empty(&rbd_dev->running_list));
3596 spin_unlock(&rbd_dev->lock_lists_lock);
3598 complete(&rbd_dev->releasing_wait);
3601 static int rbd_img_exclusive_lock(struct rbd_img_request *img_req)
3603 struct rbd_device *rbd_dev = img_req->rbd_dev;
3605 if (!need_exclusive_lock(img_req))
3608 if (rbd_lock_add_request(img_req))
3611 if (rbd_dev->opts->exclusive) {
3612 WARN_ON(1); /* lock got released? */
3617 * Note the use of mod_delayed_work() in rbd_acquire_lock()
3618 * and cancel_delayed_work() in wake_lock_waiters().
3620 dout("%s rbd_dev %p queueing lock_dwork\n", __func__, rbd_dev);
3621 queue_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork, 0);
3625 static void rbd_img_object_requests(struct rbd_img_request *img_req)
3627 struct rbd_obj_request *obj_req;
3629 rbd_assert(!img_req->pending.result && !img_req->pending.num_pending);
3631 for_each_obj_request(img_req, obj_req) {
3634 if (__rbd_obj_handle_request(obj_req, &result)) {
3636 img_req->pending.result = result;
3640 img_req->pending.num_pending++;
3645 static bool rbd_img_advance(struct rbd_img_request *img_req, int *result)
3647 struct rbd_device *rbd_dev = img_req->rbd_dev;
3651 switch (img_req->state) {
3653 rbd_assert(!*result);
3655 ret = rbd_img_exclusive_lock(img_req);
3660 img_req->state = RBD_IMG_EXCLUSIVE_LOCK;
3664 case RBD_IMG_EXCLUSIVE_LOCK:
3668 rbd_assert(!need_exclusive_lock(img_req) ||
3669 __rbd_is_lock_owner(rbd_dev));
3671 rbd_img_object_requests(img_req);
3672 if (!img_req->pending.num_pending) {
3673 *result = img_req->pending.result;
3674 img_req->state = RBD_IMG_OBJECT_REQUESTS;
3677 img_req->state = __RBD_IMG_OBJECT_REQUESTS;
3679 case __RBD_IMG_OBJECT_REQUESTS:
3680 if (!pending_result_dec(&img_req->pending, result))
3683 case RBD_IMG_OBJECT_REQUESTS:
3691 * Return true if @img_req is completed.
3693 static bool __rbd_img_handle_request(struct rbd_img_request *img_req,
3696 struct rbd_device *rbd_dev = img_req->rbd_dev;
3699 if (need_exclusive_lock(img_req)) {
3700 down_read(&rbd_dev->lock_rwsem);
3701 mutex_lock(&img_req->state_mutex);
3702 done = rbd_img_advance(img_req, result);
3704 rbd_lock_del_request(img_req);
3705 mutex_unlock(&img_req->state_mutex);
3706 up_read(&rbd_dev->lock_rwsem);
3708 mutex_lock(&img_req->state_mutex);
3709 done = rbd_img_advance(img_req, result);
3710 mutex_unlock(&img_req->state_mutex);
3713 if (done && *result) {
3714 rbd_assert(*result < 0);
3715 rbd_warn(rbd_dev, "%s%s result %d",
3716 test_bit(IMG_REQ_CHILD, &img_req->flags) ? "child " : "",
3717 obj_op_name(img_req->op_type), *result);
3722 static void rbd_img_handle_request(struct rbd_img_request *img_req, int result)
3725 if (!__rbd_img_handle_request(img_req, &result))
3728 if (test_bit(IMG_REQ_CHILD, &img_req->flags)) {
3729 struct rbd_obj_request *obj_req = img_req->obj_request;
3731 rbd_img_request_put(img_req);
3732 if (__rbd_obj_handle_request(obj_req, &result)) {
3733 img_req = obj_req->img_request;
3737 struct request *rq = img_req->rq;
3739 rbd_img_request_put(img_req);
3740 blk_mq_end_request(rq, errno_to_blk_status(result));
3744 static const struct rbd_client_id rbd_empty_cid;
3746 static bool rbd_cid_equal(const struct rbd_client_id *lhs,
3747 const struct rbd_client_id *rhs)
3749 return lhs->gid == rhs->gid && lhs->handle == rhs->handle;
3752 static struct rbd_client_id rbd_get_cid(struct rbd_device *rbd_dev)
3754 struct rbd_client_id cid;
3756 mutex_lock(&rbd_dev->watch_mutex);
3757 cid.gid = ceph_client_gid(rbd_dev->rbd_client->client);
3758 cid.handle = rbd_dev->watch_cookie;
3759 mutex_unlock(&rbd_dev->watch_mutex);
3764 * lock_rwsem must be held for write
3766 static void rbd_set_owner_cid(struct rbd_device *rbd_dev,
3767 const struct rbd_client_id *cid)
3769 dout("%s rbd_dev %p %llu-%llu -> %llu-%llu\n", __func__, rbd_dev,
3770 rbd_dev->owner_cid.gid, rbd_dev->owner_cid.handle,
3771 cid->gid, cid->handle);
3772 rbd_dev->owner_cid = *cid; /* struct */
3775 static void format_lock_cookie(struct rbd_device *rbd_dev, char *buf)
3777 mutex_lock(&rbd_dev->watch_mutex);
3778 sprintf(buf, "%s %llu", RBD_LOCK_COOKIE_PREFIX, rbd_dev->watch_cookie);
3779 mutex_unlock(&rbd_dev->watch_mutex);
3782 static void __rbd_lock(struct rbd_device *rbd_dev, const char *cookie)
3784 struct rbd_client_id cid = rbd_get_cid(rbd_dev);
3786 rbd_dev->lock_state = RBD_LOCK_STATE_LOCKED;
3787 strcpy(rbd_dev->lock_cookie, cookie);
3788 rbd_set_owner_cid(rbd_dev, &cid);
3789 queue_work(rbd_dev->task_wq, &rbd_dev->acquired_lock_work);
3793 * lock_rwsem must be held for write
3795 static int rbd_lock(struct rbd_device *rbd_dev)
3797 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3801 WARN_ON(__rbd_is_lock_owner(rbd_dev) ||
3802 rbd_dev->lock_cookie[0] != '\0');
3804 format_lock_cookie(rbd_dev, cookie);
3805 ret = ceph_cls_lock(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
3806 RBD_LOCK_NAME, CEPH_CLS_LOCK_EXCLUSIVE, cookie,
3807 RBD_LOCK_TAG, "", 0);
3811 __rbd_lock(rbd_dev, cookie);
3816 * lock_rwsem must be held for write
3818 static void rbd_unlock(struct rbd_device *rbd_dev)
3820 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3823 WARN_ON(!__rbd_is_lock_owner(rbd_dev) ||
3824 rbd_dev->lock_cookie[0] == '\0');
3826 ret = ceph_cls_unlock(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
3827 RBD_LOCK_NAME, rbd_dev->lock_cookie);
3828 if (ret && ret != -ENOENT)
3829 rbd_warn(rbd_dev, "failed to unlock header: %d", ret);
3831 /* treat errors as the image is unlocked */
3832 rbd_dev->lock_state = RBD_LOCK_STATE_UNLOCKED;
3833 rbd_dev->lock_cookie[0] = '\0';
3834 rbd_set_owner_cid(rbd_dev, &rbd_empty_cid);
3835 queue_work(rbd_dev->task_wq, &rbd_dev->released_lock_work);
3838 static int __rbd_notify_op_lock(struct rbd_device *rbd_dev,
3839 enum rbd_notify_op notify_op,
3840 struct page ***preply_pages,
3843 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3844 struct rbd_client_id cid = rbd_get_cid(rbd_dev);
3845 char buf[4 + 8 + 8 + CEPH_ENCODING_START_BLK_LEN];
3846 int buf_size = sizeof(buf);
3849 dout("%s rbd_dev %p notify_op %d\n", __func__, rbd_dev, notify_op);
3851 /* encode *LockPayload NotifyMessage (op + ClientId) */
3852 ceph_start_encoding(&p, 2, 1, buf_size - CEPH_ENCODING_START_BLK_LEN);
3853 ceph_encode_32(&p, notify_op);
3854 ceph_encode_64(&p, cid.gid);
3855 ceph_encode_64(&p, cid.handle);
3857 return ceph_osdc_notify(osdc, &rbd_dev->header_oid,
3858 &rbd_dev->header_oloc, buf, buf_size,
3859 RBD_NOTIFY_TIMEOUT, preply_pages, preply_len);
3862 static void rbd_notify_op_lock(struct rbd_device *rbd_dev,
3863 enum rbd_notify_op notify_op)
3865 struct page **reply_pages;
3868 __rbd_notify_op_lock(rbd_dev, notify_op, &reply_pages, &reply_len);
3869 ceph_release_page_vector(reply_pages, calc_pages_for(0, reply_len));
3872 static void rbd_notify_acquired_lock(struct work_struct *work)
3874 struct rbd_device *rbd_dev = container_of(work, struct rbd_device,
3875 acquired_lock_work);
3877 rbd_notify_op_lock(rbd_dev, RBD_NOTIFY_OP_ACQUIRED_LOCK);
3880 static void rbd_notify_released_lock(struct work_struct *work)
3882 struct rbd_device *rbd_dev = container_of(work, struct rbd_device,
3883 released_lock_work);
3885 rbd_notify_op_lock(rbd_dev, RBD_NOTIFY_OP_RELEASED_LOCK);
3888 static int rbd_request_lock(struct rbd_device *rbd_dev)
3890 struct page **reply_pages;
3892 bool lock_owner_responded = false;
3895 dout("%s rbd_dev %p\n", __func__, rbd_dev);
3897 ret = __rbd_notify_op_lock(rbd_dev, RBD_NOTIFY_OP_REQUEST_LOCK,
3898 &reply_pages, &reply_len);
3899 if (ret && ret != -ETIMEDOUT) {
3900 rbd_warn(rbd_dev, "failed to request lock: %d", ret);
3904 if (reply_len > 0 && reply_len <= PAGE_SIZE) {
3905 void *p = page_address(reply_pages[0]);
3906 void *const end = p + reply_len;
3909 ceph_decode_32_safe(&p, end, n, e_inval); /* num_acks */
3914 ceph_decode_need(&p, end, 8 + 8, e_inval);
3915 p += 8 + 8; /* skip gid and cookie */
3917 ceph_decode_32_safe(&p, end, len, e_inval);
3921 if (lock_owner_responded) {
3923 "duplicate lock owners detected");
3928 lock_owner_responded = true;
3929 ret = ceph_start_decoding(&p, end, 1, "ResponseMessage",
3933 "failed to decode ResponseMessage: %d",
3938 ret = ceph_decode_32(&p);
3942 if (!lock_owner_responded) {
3943 rbd_warn(rbd_dev, "no lock owners detected");
3948 ceph_release_page_vector(reply_pages, calc_pages_for(0, reply_len));
3957 * Either image request state machine(s) or rbd_add_acquire_lock()
3960 static void wake_lock_waiters(struct rbd_device *rbd_dev, int result)
3962 struct rbd_img_request *img_req;
3964 dout("%s rbd_dev %p result %d\n", __func__, rbd_dev, result);
3965 lockdep_assert_held_write(&rbd_dev->lock_rwsem);
3967 cancel_delayed_work(&rbd_dev->lock_dwork);
3968 if (!completion_done(&rbd_dev->acquire_wait)) {
3969 rbd_assert(list_empty(&rbd_dev->acquiring_list) &&
3970 list_empty(&rbd_dev->running_list));
3971 rbd_dev->acquire_err = result;
3972 complete_all(&rbd_dev->acquire_wait);
3976 list_for_each_entry(img_req, &rbd_dev->acquiring_list, lock_item) {
3977 mutex_lock(&img_req->state_mutex);
3978 rbd_assert(img_req->state == RBD_IMG_EXCLUSIVE_LOCK);
3979 rbd_img_schedule(img_req, result);
3980 mutex_unlock(&img_req->state_mutex);
3983 list_splice_tail_init(&rbd_dev->acquiring_list, &rbd_dev->running_list);
3986 static int get_lock_owner_info(struct rbd_device *rbd_dev,
3987 struct ceph_locker **lockers, u32 *num_lockers)
3989 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3994 dout("%s rbd_dev %p\n", __func__, rbd_dev);
3996 ret = ceph_cls_lock_info(osdc, &rbd_dev->header_oid,
3997 &rbd_dev->header_oloc, RBD_LOCK_NAME,
3998 &lock_type, &lock_tag, lockers, num_lockers);
4002 if (*num_lockers == 0) {
4003 dout("%s rbd_dev %p no lockers detected\n", __func__, rbd_dev);
4007 if (strcmp(lock_tag, RBD_LOCK_TAG)) {
4008 rbd_warn(rbd_dev, "locked by external mechanism, tag %s",
4014 if (lock_type == CEPH_CLS_LOCK_SHARED) {
4015 rbd_warn(rbd_dev, "shared lock type detected");
4020 if (strncmp((*lockers)[0].id.cookie, RBD_LOCK_COOKIE_PREFIX,
4021 strlen(RBD_LOCK_COOKIE_PREFIX))) {
4022 rbd_warn(rbd_dev, "locked by external mechanism, cookie %s",
4023 (*lockers)[0].id.cookie);
4033 static int find_watcher(struct rbd_device *rbd_dev,
4034 const struct ceph_locker *locker)
4036 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4037 struct ceph_watch_item *watchers;
4043 ret = ceph_osdc_list_watchers(osdc, &rbd_dev->header_oid,
4044 &rbd_dev->header_oloc, &watchers,
4049 sscanf(locker->id.cookie, RBD_LOCK_COOKIE_PREFIX " %llu", &cookie);
4050 for (i = 0; i < num_watchers; i++) {
4051 if (!memcmp(&watchers[i].addr, &locker->info.addr,
4052 sizeof(locker->info.addr)) &&
4053 watchers[i].cookie == cookie) {
4054 struct rbd_client_id cid = {
4055 .gid = le64_to_cpu(watchers[i].name.num),
4059 dout("%s rbd_dev %p found cid %llu-%llu\n", __func__,
4060 rbd_dev, cid.gid, cid.handle);
4061 rbd_set_owner_cid(rbd_dev, &cid);
4067 dout("%s rbd_dev %p no watchers\n", __func__, rbd_dev);
4075 * lock_rwsem must be held for write
4077 static int rbd_try_lock(struct rbd_device *rbd_dev)
4079 struct ceph_client *client = rbd_dev->rbd_client->client;
4080 struct ceph_locker *lockers;
4085 ret = rbd_lock(rbd_dev);
4089 /* determine if the current lock holder is still alive */
4090 ret = get_lock_owner_info(rbd_dev, &lockers, &num_lockers);
4094 if (num_lockers == 0)
4097 ret = find_watcher(rbd_dev, lockers);
4099 goto out; /* request lock or error */
4101 rbd_warn(rbd_dev, "breaking header lock owned by %s%llu",
4102 ENTITY_NAME(lockers[0].id.name));
4104 ret = ceph_monc_blacklist_add(&client->monc,
4105 &lockers[0].info.addr);
4107 rbd_warn(rbd_dev, "blacklist of %s%llu failed: %d",
4108 ENTITY_NAME(lockers[0].id.name), ret);
4112 ret = ceph_cls_break_lock(&client->osdc, &rbd_dev->header_oid,
4113 &rbd_dev->header_oloc, RBD_LOCK_NAME,
4114 lockers[0].id.cookie,
4115 &lockers[0].id.name);
4116 if (ret && ret != -ENOENT)
4120 ceph_free_lockers(lockers, num_lockers);
4124 ceph_free_lockers(lockers, num_lockers);
4128 static int rbd_post_acquire_action(struct rbd_device *rbd_dev)
4132 if (rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP) {
4133 ret = rbd_object_map_open(rbd_dev);
4144 * 1 - caller should call rbd_request_lock()
4147 static int rbd_try_acquire_lock(struct rbd_device *rbd_dev)
4151 down_read(&rbd_dev->lock_rwsem);
4152 dout("%s rbd_dev %p read lock_state %d\n", __func__, rbd_dev,
4153 rbd_dev->lock_state);
4154 if (__rbd_is_lock_owner(rbd_dev)) {
4155 up_read(&rbd_dev->lock_rwsem);
4159 up_read(&rbd_dev->lock_rwsem);
4160 down_write(&rbd_dev->lock_rwsem);
4161 dout("%s rbd_dev %p write lock_state %d\n", __func__, rbd_dev,
4162 rbd_dev->lock_state);
4163 if (__rbd_is_lock_owner(rbd_dev)) {
4164 up_write(&rbd_dev->lock_rwsem);
4168 ret = rbd_try_lock(rbd_dev);
4170 rbd_warn(rbd_dev, "failed to lock header: %d", ret);
4171 if (ret == -EBLACKLISTED)
4174 ret = 1; /* request lock anyway */
4177 up_write(&rbd_dev->lock_rwsem);
4181 rbd_assert(rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED);
4182 rbd_assert(list_empty(&rbd_dev->running_list));
4184 ret = rbd_post_acquire_action(rbd_dev);
4186 rbd_warn(rbd_dev, "post-acquire action failed: %d", ret);
4188 * Can't stay in RBD_LOCK_STATE_LOCKED because
4189 * rbd_lock_add_request() would let the request through,
4190 * assuming that e.g. object map is locked and loaded.
4192 rbd_unlock(rbd_dev);
4196 wake_lock_waiters(rbd_dev, ret);
4197 up_write(&rbd_dev->lock_rwsem);
4201 static void rbd_acquire_lock(struct work_struct *work)
4203 struct rbd_device *rbd_dev = container_of(to_delayed_work(work),
4204 struct rbd_device, lock_dwork);
4207 dout("%s rbd_dev %p\n", __func__, rbd_dev);
4209 ret = rbd_try_acquire_lock(rbd_dev);
4211 dout("%s rbd_dev %p ret %d - done\n", __func__, rbd_dev, ret);
4215 ret = rbd_request_lock(rbd_dev);
4216 if (ret == -ETIMEDOUT) {
4217 goto again; /* treat this as a dead client */
4218 } else if (ret == -EROFS) {
4219 rbd_warn(rbd_dev, "peer will not release lock");
4220 down_write(&rbd_dev->lock_rwsem);
4221 wake_lock_waiters(rbd_dev, ret);
4222 up_write(&rbd_dev->lock_rwsem);
4223 } else if (ret < 0) {
4224 rbd_warn(rbd_dev, "error requesting lock: %d", ret);
4225 mod_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork,
4229 * lock owner acked, but resend if we don't see them
4232 dout("%s rbd_dev %p requeueing lock_dwork\n", __func__,
4234 mod_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork,
4235 msecs_to_jiffies(2 * RBD_NOTIFY_TIMEOUT * MSEC_PER_SEC));
4239 static bool rbd_quiesce_lock(struct rbd_device *rbd_dev)
4243 dout("%s rbd_dev %p\n", __func__, rbd_dev);
4244 lockdep_assert_held_write(&rbd_dev->lock_rwsem);
4246 if (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED)
4250 * Ensure that all in-flight IO is flushed.
4252 rbd_dev->lock_state = RBD_LOCK_STATE_RELEASING;
4253 rbd_assert(!completion_done(&rbd_dev->releasing_wait));
4254 need_wait = !list_empty(&rbd_dev->running_list);
4255 downgrade_write(&rbd_dev->lock_rwsem);
4257 wait_for_completion(&rbd_dev->releasing_wait);
4258 up_read(&rbd_dev->lock_rwsem);
4260 down_write(&rbd_dev->lock_rwsem);
4261 if (rbd_dev->lock_state != RBD_LOCK_STATE_RELEASING)
4264 rbd_assert(list_empty(&rbd_dev->running_list));
4268 static void rbd_pre_release_action(struct rbd_device *rbd_dev)
4270 if (rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP)
4271 rbd_object_map_close(rbd_dev);
4274 static void __rbd_release_lock(struct rbd_device *rbd_dev)
4276 rbd_assert(list_empty(&rbd_dev->running_list));
4278 rbd_pre_release_action(rbd_dev);
4279 rbd_unlock(rbd_dev);
4283 * lock_rwsem must be held for write
4285 static void rbd_release_lock(struct rbd_device *rbd_dev)
4287 if (!rbd_quiesce_lock(rbd_dev))
4290 __rbd_release_lock(rbd_dev);
4293 * Give others a chance to grab the lock - we would re-acquire
4294 * almost immediately if we got new IO while draining the running
4295 * list otherwise. We need to ack our own notifications, so this
4296 * lock_dwork will be requeued from rbd_handle_released_lock() by
4297 * way of maybe_kick_acquire().
4299 cancel_delayed_work(&rbd_dev->lock_dwork);
4302 static void rbd_release_lock_work(struct work_struct *work)
4304 struct rbd_device *rbd_dev = container_of(work, struct rbd_device,
4307 down_write(&rbd_dev->lock_rwsem);
4308 rbd_release_lock(rbd_dev);
4309 up_write(&rbd_dev->lock_rwsem);
4312 static void maybe_kick_acquire(struct rbd_device *rbd_dev)
4316 dout("%s rbd_dev %p\n", __func__, rbd_dev);
4317 if (__rbd_is_lock_owner(rbd_dev))
4320 spin_lock(&rbd_dev->lock_lists_lock);
4321 have_requests = !list_empty(&rbd_dev->acquiring_list);
4322 spin_unlock(&rbd_dev->lock_lists_lock);
4323 if (have_requests || delayed_work_pending(&rbd_dev->lock_dwork)) {
4324 dout("%s rbd_dev %p kicking lock_dwork\n", __func__, rbd_dev);
4325 mod_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork, 0);
4329 static void rbd_handle_acquired_lock(struct rbd_device *rbd_dev, u8 struct_v,
4332 struct rbd_client_id cid = { 0 };
4334 if (struct_v >= 2) {
4335 cid.gid = ceph_decode_64(p);
4336 cid.handle = ceph_decode_64(p);
4339 dout("%s rbd_dev %p cid %llu-%llu\n", __func__, rbd_dev, cid.gid,
4341 if (!rbd_cid_equal(&cid, &rbd_empty_cid)) {
4342 down_write(&rbd_dev->lock_rwsem);
4343 if (rbd_cid_equal(&cid, &rbd_dev->owner_cid)) {
4345 * we already know that the remote client is
4348 up_write(&rbd_dev->lock_rwsem);
4352 rbd_set_owner_cid(rbd_dev, &cid);
4353 downgrade_write(&rbd_dev->lock_rwsem);
4355 down_read(&rbd_dev->lock_rwsem);
4358 maybe_kick_acquire(rbd_dev);
4359 up_read(&rbd_dev->lock_rwsem);
4362 static void rbd_handle_released_lock(struct rbd_device *rbd_dev, u8 struct_v,
4365 struct rbd_client_id cid = { 0 };
4367 if (struct_v >= 2) {
4368 cid.gid = ceph_decode_64(p);
4369 cid.handle = ceph_decode_64(p);
4372 dout("%s rbd_dev %p cid %llu-%llu\n", __func__, rbd_dev, cid.gid,
4374 if (!rbd_cid_equal(&cid, &rbd_empty_cid)) {
4375 down_write(&rbd_dev->lock_rwsem);
4376 if (!rbd_cid_equal(&cid, &rbd_dev->owner_cid)) {
4377 dout("%s rbd_dev %p unexpected owner, cid %llu-%llu != owner_cid %llu-%llu\n",
4378 __func__, rbd_dev, cid.gid, cid.handle,
4379 rbd_dev->owner_cid.gid, rbd_dev->owner_cid.handle);
4380 up_write(&rbd_dev->lock_rwsem);
4384 rbd_set_owner_cid(rbd_dev, &rbd_empty_cid);
4385 downgrade_write(&rbd_dev->lock_rwsem);
4387 down_read(&rbd_dev->lock_rwsem);
4390 maybe_kick_acquire(rbd_dev);
4391 up_read(&rbd_dev->lock_rwsem);
4395 * Returns result for ResponseMessage to be encoded (<= 0), or 1 if no
4396 * ResponseMessage is needed.
4398 static int rbd_handle_request_lock(struct rbd_device *rbd_dev, u8 struct_v,
4401 struct rbd_client_id my_cid = rbd_get_cid(rbd_dev);
4402 struct rbd_client_id cid = { 0 };
4405 if (struct_v >= 2) {
4406 cid.gid = ceph_decode_64(p);
4407 cid.handle = ceph_decode_64(p);
4410 dout("%s rbd_dev %p cid %llu-%llu\n", __func__, rbd_dev, cid.gid,
4412 if (rbd_cid_equal(&cid, &my_cid))
4415 down_read(&rbd_dev->lock_rwsem);
4416 if (__rbd_is_lock_owner(rbd_dev)) {
4417 if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED &&
4418 rbd_cid_equal(&rbd_dev->owner_cid, &rbd_empty_cid))
4422 * encode ResponseMessage(0) so the peer can detect
4427 if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED) {
4428 if (!rbd_dev->opts->exclusive) {
4429 dout("%s rbd_dev %p queueing unlock_work\n",
4431 queue_work(rbd_dev->task_wq,
4432 &rbd_dev->unlock_work);
4434 /* refuse to release the lock */
4441 up_read(&rbd_dev->lock_rwsem);
4445 static void __rbd_acknowledge_notify(struct rbd_device *rbd_dev,
4446 u64 notify_id, u64 cookie, s32 *result)
4448 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4449 char buf[4 + CEPH_ENCODING_START_BLK_LEN];
4450 int buf_size = sizeof(buf);
4456 /* encode ResponseMessage */
4457 ceph_start_encoding(&p, 1, 1,
4458 buf_size - CEPH_ENCODING_START_BLK_LEN);
4459 ceph_encode_32(&p, *result);
4464 ret = ceph_osdc_notify_ack(osdc, &rbd_dev->header_oid,
4465 &rbd_dev->header_oloc, notify_id, cookie,
4468 rbd_warn(rbd_dev, "acknowledge_notify failed: %d", ret);
4471 static void rbd_acknowledge_notify(struct rbd_device *rbd_dev, u64 notify_id,
4474 dout("%s rbd_dev %p\n", __func__, rbd_dev);
4475 __rbd_acknowledge_notify(rbd_dev, notify_id, cookie, NULL);
4478 static void rbd_acknowledge_notify_result(struct rbd_device *rbd_dev,
4479 u64 notify_id, u64 cookie, s32 result)
4481 dout("%s rbd_dev %p result %d\n", __func__, rbd_dev, result);
4482 __rbd_acknowledge_notify(rbd_dev, notify_id, cookie, &result);
4485 static void rbd_watch_cb(void *arg, u64 notify_id, u64 cookie,
4486 u64 notifier_id, void *data, size_t data_len)
4488 struct rbd_device *rbd_dev = arg;
4490 void *const end = p + data_len;
4496 dout("%s rbd_dev %p cookie %llu notify_id %llu data_len %zu\n",
4497 __func__, rbd_dev, cookie, notify_id, data_len);
4499 ret = ceph_start_decoding(&p, end, 1, "NotifyMessage",
4502 rbd_warn(rbd_dev, "failed to decode NotifyMessage: %d",
4507 notify_op = ceph_decode_32(&p);
4509 /* legacy notification for header updates */
4510 notify_op = RBD_NOTIFY_OP_HEADER_UPDATE;
4514 dout("%s rbd_dev %p notify_op %u\n", __func__, rbd_dev, notify_op);
4515 switch (notify_op) {
4516 case RBD_NOTIFY_OP_ACQUIRED_LOCK:
4517 rbd_handle_acquired_lock(rbd_dev, struct_v, &p);
4518 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
4520 case RBD_NOTIFY_OP_RELEASED_LOCK:
4521 rbd_handle_released_lock(rbd_dev, struct_v, &p);
4522 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
4524 case RBD_NOTIFY_OP_REQUEST_LOCK:
4525 ret = rbd_handle_request_lock(rbd_dev, struct_v, &p);
4527 rbd_acknowledge_notify_result(rbd_dev, notify_id,
4530 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
4532 case RBD_NOTIFY_OP_HEADER_UPDATE:
4533 ret = rbd_dev_refresh(rbd_dev);
4535 rbd_warn(rbd_dev, "refresh failed: %d", ret);
4537 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
4540 if (rbd_is_lock_owner(rbd_dev))
4541 rbd_acknowledge_notify_result(rbd_dev, notify_id,
4542 cookie, -EOPNOTSUPP);
4544 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
4549 static void __rbd_unregister_watch(struct rbd_device *rbd_dev);
4551 static void rbd_watch_errcb(void *arg, u64 cookie, int err)
4553 struct rbd_device *rbd_dev = arg;
4555 rbd_warn(rbd_dev, "encountered watch error: %d", err);
4557 down_write(&rbd_dev->lock_rwsem);
4558 rbd_set_owner_cid(rbd_dev, &rbd_empty_cid);
4559 up_write(&rbd_dev->lock_rwsem);
4561 mutex_lock(&rbd_dev->watch_mutex);
4562 if (rbd_dev->watch_state == RBD_WATCH_STATE_REGISTERED) {
4563 __rbd_unregister_watch(rbd_dev);
4564 rbd_dev->watch_state = RBD_WATCH_STATE_ERROR;
4566 queue_delayed_work(rbd_dev->task_wq, &rbd_dev->watch_dwork, 0);
4568 mutex_unlock(&rbd_dev->watch_mutex);
4572 * watch_mutex must be locked
4574 static int __rbd_register_watch(struct rbd_device *rbd_dev)
4576 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4577 struct ceph_osd_linger_request *handle;
4579 rbd_assert(!rbd_dev->watch_handle);
4580 dout("%s rbd_dev %p\n", __func__, rbd_dev);
4582 handle = ceph_osdc_watch(osdc, &rbd_dev->header_oid,
4583 &rbd_dev->header_oloc, rbd_watch_cb,
4584 rbd_watch_errcb, rbd_dev);
4586 return PTR_ERR(handle);
4588 rbd_dev->watch_handle = handle;
4593 * watch_mutex must be locked
4595 static void __rbd_unregister_watch(struct rbd_device *rbd_dev)
4597 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4600 rbd_assert(rbd_dev->watch_handle);
4601 dout("%s rbd_dev %p\n", __func__, rbd_dev);
4603 ret = ceph_osdc_unwatch(osdc, rbd_dev->watch_handle);
4605 rbd_warn(rbd_dev, "failed to unwatch: %d", ret);
4607 rbd_dev->watch_handle = NULL;
4610 static int rbd_register_watch(struct rbd_device *rbd_dev)
4614 mutex_lock(&rbd_dev->watch_mutex);
4615 rbd_assert(rbd_dev->watch_state == RBD_WATCH_STATE_UNREGISTERED);
4616 ret = __rbd_register_watch(rbd_dev);
4620 rbd_dev->watch_state = RBD_WATCH_STATE_REGISTERED;
4621 rbd_dev->watch_cookie = rbd_dev->watch_handle->linger_id;
4624 mutex_unlock(&rbd_dev->watch_mutex);
4628 static void cancel_tasks_sync(struct rbd_device *rbd_dev)
4630 dout("%s rbd_dev %p\n", __func__, rbd_dev);
4632 cancel_work_sync(&rbd_dev->acquired_lock_work);
4633 cancel_work_sync(&rbd_dev->released_lock_work);
4634 cancel_delayed_work_sync(&rbd_dev->lock_dwork);
4635 cancel_work_sync(&rbd_dev->unlock_work);
4638 static void rbd_unregister_watch(struct rbd_device *rbd_dev)
4640 cancel_tasks_sync(rbd_dev);
4642 mutex_lock(&rbd_dev->watch_mutex);
4643 if (rbd_dev->watch_state == RBD_WATCH_STATE_REGISTERED)
4644 __rbd_unregister_watch(rbd_dev);
4645 rbd_dev->watch_state = RBD_WATCH_STATE_UNREGISTERED;
4646 mutex_unlock(&rbd_dev->watch_mutex);
4648 cancel_delayed_work_sync(&rbd_dev->watch_dwork);
4649 ceph_osdc_flush_notifies(&rbd_dev->rbd_client->client->osdc);
4653 * lock_rwsem must be held for write
4655 static void rbd_reacquire_lock(struct rbd_device *rbd_dev)
4657 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4661 if (!rbd_quiesce_lock(rbd_dev))
4664 format_lock_cookie(rbd_dev, cookie);
4665 ret = ceph_cls_set_cookie(osdc, &rbd_dev->header_oid,
4666 &rbd_dev->header_oloc, RBD_LOCK_NAME,
4667 CEPH_CLS_LOCK_EXCLUSIVE, rbd_dev->lock_cookie,
4668 RBD_LOCK_TAG, cookie);
4670 if (ret != -EOPNOTSUPP)
4671 rbd_warn(rbd_dev, "failed to update lock cookie: %d",
4675 * Lock cookie cannot be updated on older OSDs, so do
4676 * a manual release and queue an acquire.
4678 __rbd_release_lock(rbd_dev);
4679 queue_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork, 0);
4681 __rbd_lock(rbd_dev, cookie);
4682 wake_lock_waiters(rbd_dev, 0);
4686 static void rbd_reregister_watch(struct work_struct *work)
4688 struct rbd_device *rbd_dev = container_of(to_delayed_work(work),
4689 struct rbd_device, watch_dwork);
4692 dout("%s rbd_dev %p\n", __func__, rbd_dev);
4694 mutex_lock(&rbd_dev->watch_mutex);
4695 if (rbd_dev->watch_state != RBD_WATCH_STATE_ERROR) {
4696 mutex_unlock(&rbd_dev->watch_mutex);
4700 ret = __rbd_register_watch(rbd_dev);
4702 rbd_warn(rbd_dev, "failed to reregister watch: %d", ret);
4703 if (ret != -EBLACKLISTED && ret != -ENOENT) {
4704 queue_delayed_work(rbd_dev->task_wq,
4705 &rbd_dev->watch_dwork,
4707 mutex_unlock(&rbd_dev->watch_mutex);
4711 mutex_unlock(&rbd_dev->watch_mutex);
4712 down_write(&rbd_dev->lock_rwsem);
4713 wake_lock_waiters(rbd_dev, ret);
4714 up_write(&rbd_dev->lock_rwsem);
4718 rbd_dev->watch_state = RBD_WATCH_STATE_REGISTERED;
4719 rbd_dev->watch_cookie = rbd_dev->watch_handle->linger_id;
4720 mutex_unlock(&rbd_dev->watch_mutex);
4722 down_write(&rbd_dev->lock_rwsem);
4723 if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED)
4724 rbd_reacquire_lock(rbd_dev);
4725 up_write(&rbd_dev->lock_rwsem);
4727 ret = rbd_dev_refresh(rbd_dev);
4729 rbd_warn(rbd_dev, "reregistration refresh failed: %d", ret);
4733 * Synchronous osd object method call. Returns the number of bytes
4734 * returned in the outbound buffer, or a negative error code.
4736 static int rbd_obj_method_sync(struct rbd_device *rbd_dev,
4737 struct ceph_object_id *oid,
4738 struct ceph_object_locator *oloc,
4739 const char *method_name,
4740 const void *outbound,
4741 size_t outbound_size,
4743 size_t inbound_size)
4745 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4746 struct page *req_page = NULL;
4747 struct page *reply_page;
4751 * Method calls are ultimately read operations. The result
4752 * should placed into the inbound buffer provided. They
4753 * also supply outbound data--parameters for the object
4754 * method. Currently if this is present it will be a
4758 if (outbound_size > PAGE_SIZE)
4761 req_page = alloc_page(GFP_KERNEL);
4765 memcpy(page_address(req_page), outbound, outbound_size);
4768 reply_page = alloc_page(GFP_KERNEL);
4771 __free_page(req_page);
4775 ret = ceph_osdc_call(osdc, oid, oloc, RBD_DRV_NAME, method_name,
4776 CEPH_OSD_FLAG_READ, req_page, outbound_size,
4777 &reply_page, &inbound_size);
4779 memcpy(inbound, page_address(reply_page), inbound_size);
4784 __free_page(req_page);
4785 __free_page(reply_page);
4789 static void rbd_queue_workfn(struct work_struct *work)
4791 struct request *rq = blk_mq_rq_from_pdu(work);
4792 struct rbd_device *rbd_dev = rq->q->queuedata;
4793 struct rbd_img_request *img_request;
4794 struct ceph_snap_context *snapc = NULL;
4795 u64 offset = (u64)blk_rq_pos(rq) << SECTOR_SHIFT;
4796 u64 length = blk_rq_bytes(rq);
4797 enum obj_operation_type op_type;
4801 switch (req_op(rq)) {
4802 case REQ_OP_DISCARD:
4803 op_type = OBJ_OP_DISCARD;
4805 case REQ_OP_WRITE_ZEROES:
4806 op_type = OBJ_OP_ZEROOUT;
4809 op_type = OBJ_OP_WRITE;
4812 op_type = OBJ_OP_READ;
4815 dout("%s: non-fs request type %d\n", __func__, req_op(rq));
4820 /* Ignore/skip any zero-length requests */
4823 dout("%s: zero-length request\n", __func__);
4828 if (op_type != OBJ_OP_READ && rbd_dev->spec->snap_id != CEPH_NOSNAP) {
4829 rbd_warn(rbd_dev, "%s on read-only snapshot",
4830 obj_op_name(op_type));
4836 * Quit early if the mapped snapshot no longer exists. It's
4837 * still possible the snapshot will have disappeared by the
4838 * time our request arrives at the osd, but there's no sense in
4839 * sending it if we already know.
4841 if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags)) {
4842 dout("request for non-existent snapshot");
4843 rbd_assert(rbd_dev->spec->snap_id != CEPH_NOSNAP);
4848 if (offset && length > U64_MAX - offset + 1) {
4849 rbd_warn(rbd_dev, "bad request range (%llu~%llu)", offset,
4852 goto err_rq; /* Shouldn't happen */
4855 blk_mq_start_request(rq);
4857 down_read(&rbd_dev->header_rwsem);
4858 mapping_size = rbd_dev->mapping.size;
4859 if (op_type != OBJ_OP_READ) {
4860 snapc = rbd_dev->header.snapc;
4861 ceph_get_snap_context(snapc);
4863 up_read(&rbd_dev->header_rwsem);
4865 if (offset + length > mapping_size) {
4866 rbd_warn(rbd_dev, "beyond EOD (%llu~%llu > %llu)", offset,
4867 length, mapping_size);
4872 img_request = rbd_img_request_create(rbd_dev, op_type, snapc);
4877 img_request->rq = rq;
4878 snapc = NULL; /* img_request consumes a ref */
4880 if (op_type == OBJ_OP_DISCARD || op_type == OBJ_OP_ZEROOUT)
4881 result = rbd_img_fill_nodata(img_request, offset, length);
4883 result = rbd_img_fill_from_bio(img_request, offset, length,
4886 goto err_img_request;
4888 rbd_img_handle_request(img_request, 0);
4892 rbd_img_request_put(img_request);
4895 rbd_warn(rbd_dev, "%s %llx at %llx result %d",
4896 obj_op_name(op_type), length, offset, result);
4897 ceph_put_snap_context(snapc);
4899 blk_mq_end_request(rq, errno_to_blk_status(result));
4902 static blk_status_t rbd_queue_rq(struct blk_mq_hw_ctx *hctx,
4903 const struct blk_mq_queue_data *bd)
4905 struct request *rq = bd->rq;
4906 struct work_struct *work = blk_mq_rq_to_pdu(rq);
4908 queue_work(rbd_wq, work);
4912 static void rbd_free_disk(struct rbd_device *rbd_dev)
4914 blk_cleanup_queue(rbd_dev->disk->queue);
4915 blk_mq_free_tag_set(&rbd_dev->tag_set);
4916 put_disk(rbd_dev->disk);
4917 rbd_dev->disk = NULL;
4920 static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
4921 struct ceph_object_id *oid,
4922 struct ceph_object_locator *oloc,
4923 void *buf, int buf_len)
4926 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4927 struct ceph_osd_request *req;
4928 struct page **pages;
4929 int num_pages = calc_pages_for(0, buf_len);
4932 req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_KERNEL);
4936 ceph_oid_copy(&req->r_base_oid, oid);
4937 ceph_oloc_copy(&req->r_base_oloc, oloc);
4938 req->r_flags = CEPH_OSD_FLAG_READ;
4940 pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
4941 if (IS_ERR(pages)) {
4942 ret = PTR_ERR(pages);
4946 osd_req_op_extent_init(req, 0, CEPH_OSD_OP_READ, 0, buf_len, 0, 0);
4947 osd_req_op_extent_osd_data_pages(req, 0, pages, buf_len, 0, false,
4950 ret = ceph_osdc_alloc_messages(req, GFP_KERNEL);
4954 ceph_osdc_start_request(osdc, req, false);
4955 ret = ceph_osdc_wait_request(osdc, req);
4957 ceph_copy_from_page_vector(pages, buf, 0, ret);
4960 ceph_osdc_put_request(req);
4965 * Read the complete header for the given rbd device. On successful
4966 * return, the rbd_dev->header field will contain up-to-date
4967 * information about the image.
4969 static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev)
4971 struct rbd_image_header_ondisk *ondisk = NULL;
4978 * The complete header will include an array of its 64-bit
4979 * snapshot ids, followed by the names of those snapshots as
4980 * a contiguous block of NUL-terminated strings. Note that
4981 * the number of snapshots could change by the time we read
4982 * it in, in which case we re-read it.
4989 size = sizeof (*ondisk);
4990 size += snap_count * sizeof (struct rbd_image_snap_ondisk);
4992 ondisk = kmalloc(size, GFP_KERNEL);
4996 ret = rbd_obj_read_sync(rbd_dev, &rbd_dev->header_oid,
4997 &rbd_dev->header_oloc, ondisk, size);
5000 if ((size_t)ret < size) {
5002 rbd_warn(rbd_dev, "short header read (want %zd got %d)",
5006 if (!rbd_dev_ondisk_valid(ondisk)) {
5008 rbd_warn(rbd_dev, "invalid header");
5012 names_size = le64_to_cpu(ondisk->snap_names_len);
5013 want_count = snap_count;
5014 snap_count = le32_to_cpu(ondisk->snap_count);
5015 } while (snap_count != want_count);
5017 ret = rbd_header_from_disk(rbd_dev, ondisk);
5025 * Clear the rbd device's EXISTS flag if the snapshot it's mapped to
5026 * has disappeared from the (just updated) snapshot context.
5028 static void rbd_exists_validate(struct rbd_device *rbd_dev)
5032 if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags))
5035 snap_id = rbd_dev->spec->snap_id;
5036 if (snap_id == CEPH_NOSNAP)
5039 if (rbd_dev_snap_index(rbd_dev, snap_id) == BAD_SNAP_INDEX)
5040 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
5043 static void rbd_dev_update_size(struct rbd_device *rbd_dev)
5048 * If EXISTS is not set, rbd_dev->disk may be NULL, so don't
5049 * try to update its size. If REMOVING is set, updating size
5050 * is just useless work since the device can't be opened.
5052 if (test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags) &&
5053 !test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags)) {
5054 size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE;
5055 dout("setting size to %llu sectors", (unsigned long long)size);
5056 set_capacity(rbd_dev->disk, size);
5057 revalidate_disk(rbd_dev->disk);
5061 static int rbd_dev_refresh(struct rbd_device *rbd_dev)
5066 down_write(&rbd_dev->header_rwsem);
5067 mapping_size = rbd_dev->mapping.size;
5069 ret = rbd_dev_header_info(rbd_dev);
5074 * If there is a parent, see if it has disappeared due to the
5075 * mapped image getting flattened.
5077 if (rbd_dev->parent) {
5078 ret = rbd_dev_v2_parent_info(rbd_dev);
5083 if (rbd_dev->spec->snap_id == CEPH_NOSNAP) {
5084 rbd_dev->mapping.size = rbd_dev->header.image_size;
5086 /* validate mapped snapshot's EXISTS flag */
5087 rbd_exists_validate(rbd_dev);
5091 up_write(&rbd_dev->header_rwsem);
5092 if (!ret && mapping_size != rbd_dev->mapping.size)
5093 rbd_dev_update_size(rbd_dev);
5098 static int rbd_init_request(struct blk_mq_tag_set *set, struct request *rq,
5099 unsigned int hctx_idx, unsigned int numa_node)
5101 struct work_struct *work = blk_mq_rq_to_pdu(rq);
5103 INIT_WORK(work, rbd_queue_workfn);
5107 static const struct blk_mq_ops rbd_mq_ops = {
5108 .queue_rq = rbd_queue_rq,
5109 .init_request = rbd_init_request,
5112 static int rbd_init_disk(struct rbd_device *rbd_dev)
5114 struct gendisk *disk;
5115 struct request_queue *q;
5116 unsigned int objset_bytes =
5117 rbd_dev->layout.object_size * rbd_dev->layout.stripe_count;
5120 /* create gendisk info */
5121 disk = alloc_disk(single_major ?
5122 (1 << RBD_SINGLE_MAJOR_PART_SHIFT) :
5123 RBD_MINORS_PER_MAJOR);
5127 snprintf(disk->disk_name, sizeof(disk->disk_name), RBD_DRV_NAME "%d",
5129 disk->major = rbd_dev->major;
5130 disk->first_minor = rbd_dev->minor;
5132 disk->flags |= GENHD_FL_EXT_DEVT;
5133 disk->fops = &rbd_bd_ops;
5134 disk->private_data = rbd_dev;
5136 memset(&rbd_dev->tag_set, 0, sizeof(rbd_dev->tag_set));
5137 rbd_dev->tag_set.ops = &rbd_mq_ops;
5138 rbd_dev->tag_set.queue_depth = rbd_dev->opts->queue_depth;
5139 rbd_dev->tag_set.numa_node = NUMA_NO_NODE;
5140 rbd_dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
5141 rbd_dev->tag_set.nr_hw_queues = 1;
5142 rbd_dev->tag_set.cmd_size = sizeof(struct work_struct);
5144 err = blk_mq_alloc_tag_set(&rbd_dev->tag_set);
5148 q = blk_mq_init_queue(&rbd_dev->tag_set);
5154 blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
5155 /* QUEUE_FLAG_ADD_RANDOM is off by default for blk-mq */
5157 blk_queue_max_hw_sectors(q, objset_bytes >> SECTOR_SHIFT);
5158 q->limits.max_sectors = queue_max_hw_sectors(q);
5159 blk_queue_max_segments(q, USHRT_MAX);
5160 blk_queue_max_segment_size(q, UINT_MAX);
5161 blk_queue_io_min(q, rbd_dev->opts->alloc_size);
5162 blk_queue_io_opt(q, rbd_dev->opts->alloc_size);
5164 if (rbd_dev->opts->trim) {
5165 blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
5166 q->limits.discard_granularity = rbd_dev->opts->alloc_size;
5167 blk_queue_max_discard_sectors(q, objset_bytes >> SECTOR_SHIFT);
5168 blk_queue_max_write_zeroes_sectors(q, objset_bytes >> SECTOR_SHIFT);
5171 if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC))
5172 q->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES;
5175 * disk_release() expects a queue ref from add_disk() and will
5176 * put it. Hold an extra ref until add_disk() is called.
5178 WARN_ON(!blk_get_queue(q));
5180 q->queuedata = rbd_dev;
5182 rbd_dev->disk = disk;
5186 blk_mq_free_tag_set(&rbd_dev->tag_set);
5196 static struct rbd_device *dev_to_rbd_dev(struct device *dev)
5198 return container_of(dev, struct rbd_device, dev);
5201 static ssize_t rbd_size_show(struct device *dev,
5202 struct device_attribute *attr, char *buf)
5204 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5206 return sprintf(buf, "%llu\n",
5207 (unsigned long long)rbd_dev->mapping.size);
5211 * Note this shows the features for whatever's mapped, which is not
5212 * necessarily the base image.
5214 static ssize_t rbd_features_show(struct device *dev,
5215 struct device_attribute *attr, char *buf)
5217 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5219 return sprintf(buf, "0x%016llx\n",
5220 (unsigned long long)rbd_dev->mapping.features);
5223 static ssize_t rbd_major_show(struct device *dev,
5224 struct device_attribute *attr, char *buf)
5226 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5229 return sprintf(buf, "%d\n", rbd_dev->major);
5231 return sprintf(buf, "(none)\n");
5234 static ssize_t rbd_minor_show(struct device *dev,
5235 struct device_attribute *attr, char *buf)
5237 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5239 return sprintf(buf, "%d\n", rbd_dev->minor);
5242 static ssize_t rbd_client_addr_show(struct device *dev,
5243 struct device_attribute *attr, char *buf)
5245 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5246 struct ceph_entity_addr *client_addr =
5247 ceph_client_addr(rbd_dev->rbd_client->client);
5249 return sprintf(buf, "%pISpc/%u\n", &client_addr->in_addr,
5250 le32_to_cpu(client_addr->nonce));
5253 static ssize_t rbd_client_id_show(struct device *dev,
5254 struct device_attribute *attr, char *buf)
5256 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5258 return sprintf(buf, "client%lld\n",
5259 ceph_client_gid(rbd_dev->rbd_client->client));
5262 static ssize_t rbd_cluster_fsid_show(struct device *dev,
5263 struct device_attribute *attr, char *buf)
5265 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5267 return sprintf(buf, "%pU\n", &rbd_dev->rbd_client->client->fsid);
5270 static ssize_t rbd_config_info_show(struct device *dev,
5271 struct device_attribute *attr, char *buf)
5273 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5275 return sprintf(buf, "%s\n", rbd_dev->config_info);
5278 static ssize_t rbd_pool_show(struct device *dev,
5279 struct device_attribute *attr, char *buf)
5281 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5283 return sprintf(buf, "%s\n", rbd_dev->spec->pool_name);
5286 static ssize_t rbd_pool_id_show(struct device *dev,
5287 struct device_attribute *attr, char *buf)
5289 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5291 return sprintf(buf, "%llu\n",
5292 (unsigned long long) rbd_dev->spec->pool_id);
5295 static ssize_t rbd_pool_ns_show(struct device *dev,
5296 struct device_attribute *attr, char *buf)
5298 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5300 return sprintf(buf, "%s\n", rbd_dev->spec->pool_ns ?: "");
5303 static ssize_t rbd_name_show(struct device *dev,
5304 struct device_attribute *attr, char *buf)
5306 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5308 if (rbd_dev->spec->image_name)
5309 return sprintf(buf, "%s\n", rbd_dev->spec->image_name);
5311 return sprintf(buf, "(unknown)\n");
5314 static ssize_t rbd_image_id_show(struct device *dev,
5315 struct device_attribute *attr, char *buf)
5317 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5319 return sprintf(buf, "%s\n", rbd_dev->spec->image_id);
5323 * Shows the name of the currently-mapped snapshot (or
5324 * RBD_SNAP_HEAD_NAME for the base image).
5326 static ssize_t rbd_snap_show(struct device *dev,
5327 struct device_attribute *attr,
5330 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5332 return sprintf(buf, "%s\n", rbd_dev->spec->snap_name);
5335 static ssize_t rbd_snap_id_show(struct device *dev,
5336 struct device_attribute *attr, char *buf)
5338 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5340 return sprintf(buf, "%llu\n", rbd_dev->spec->snap_id);
5344 * For a v2 image, shows the chain of parent images, separated by empty
5345 * lines. For v1 images or if there is no parent, shows "(no parent
5348 static ssize_t rbd_parent_show(struct device *dev,
5349 struct device_attribute *attr,
5352 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5355 if (!rbd_dev->parent)
5356 return sprintf(buf, "(no parent image)\n");
5358 for ( ; rbd_dev->parent; rbd_dev = rbd_dev->parent) {
5359 struct rbd_spec *spec = rbd_dev->parent_spec;
5361 count += sprintf(&buf[count], "%s"
5362 "pool_id %llu\npool_name %s\n"
5364 "image_id %s\nimage_name %s\n"
5365 "snap_id %llu\nsnap_name %s\n"
5367 !count ? "" : "\n", /* first? */
5368 spec->pool_id, spec->pool_name,
5369 spec->pool_ns ?: "",
5370 spec->image_id, spec->image_name ?: "(unknown)",
5371 spec->snap_id, spec->snap_name,
5372 rbd_dev->parent_overlap);
5378 static ssize_t rbd_image_refresh(struct device *dev,
5379 struct device_attribute *attr,
5383 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5386 ret = rbd_dev_refresh(rbd_dev);
5393 static DEVICE_ATTR(size, 0444, rbd_size_show, NULL);
5394 static DEVICE_ATTR(features, 0444, rbd_features_show, NULL);
5395 static DEVICE_ATTR(major, 0444, rbd_major_show, NULL);
5396 static DEVICE_ATTR(minor, 0444, rbd_minor_show, NULL);
5397 static DEVICE_ATTR(client_addr, 0444, rbd_client_addr_show, NULL);
5398 static DEVICE_ATTR(client_id, 0444, rbd_client_id_show, NULL);
5399 static DEVICE_ATTR(cluster_fsid, 0444, rbd_cluster_fsid_show, NULL);
5400 static DEVICE_ATTR(config_info, 0400, rbd_config_info_show, NULL);
5401 static DEVICE_ATTR(pool, 0444, rbd_pool_show, NULL);
5402 static DEVICE_ATTR(pool_id, 0444, rbd_pool_id_show, NULL);
5403 static DEVICE_ATTR(pool_ns, 0444, rbd_pool_ns_show, NULL);
5404 static DEVICE_ATTR(name, 0444, rbd_name_show, NULL);
5405 static DEVICE_ATTR(image_id, 0444, rbd_image_id_show, NULL);
5406 static DEVICE_ATTR(refresh, 0200, NULL, rbd_image_refresh);
5407 static DEVICE_ATTR(current_snap, 0444, rbd_snap_show, NULL);
5408 static DEVICE_ATTR(snap_id, 0444, rbd_snap_id_show, NULL);
5409 static DEVICE_ATTR(parent, 0444, rbd_parent_show, NULL);
5411 static struct attribute *rbd_attrs[] = {
5412 &dev_attr_size.attr,
5413 &dev_attr_features.attr,
5414 &dev_attr_major.attr,
5415 &dev_attr_minor.attr,
5416 &dev_attr_client_addr.attr,
5417 &dev_attr_client_id.attr,
5418 &dev_attr_cluster_fsid.attr,
5419 &dev_attr_config_info.attr,
5420 &dev_attr_pool.attr,
5421 &dev_attr_pool_id.attr,
5422 &dev_attr_pool_ns.attr,
5423 &dev_attr_name.attr,
5424 &dev_attr_image_id.attr,
5425 &dev_attr_current_snap.attr,
5426 &dev_attr_snap_id.attr,
5427 &dev_attr_parent.attr,
5428 &dev_attr_refresh.attr,
5432 static struct attribute_group rbd_attr_group = {
5436 static const struct attribute_group *rbd_attr_groups[] = {
5441 static void rbd_dev_release(struct device *dev);
5443 static const struct device_type rbd_device_type = {
5445 .groups = rbd_attr_groups,
5446 .release = rbd_dev_release,
5449 static struct rbd_spec *rbd_spec_get(struct rbd_spec *spec)
5451 kref_get(&spec->kref);
5456 static void rbd_spec_free(struct kref *kref);
5457 static void rbd_spec_put(struct rbd_spec *spec)
5460 kref_put(&spec->kref, rbd_spec_free);
5463 static struct rbd_spec *rbd_spec_alloc(void)
5465 struct rbd_spec *spec;
5467 spec = kzalloc(sizeof (*spec), GFP_KERNEL);
5471 spec->pool_id = CEPH_NOPOOL;
5472 spec->snap_id = CEPH_NOSNAP;
5473 kref_init(&spec->kref);
5478 static void rbd_spec_free(struct kref *kref)
5480 struct rbd_spec *spec = container_of(kref, struct rbd_spec, kref);
5482 kfree(spec->pool_name);
5483 kfree(spec->pool_ns);
5484 kfree(spec->image_id);
5485 kfree(spec->image_name);
5486 kfree(spec->snap_name);
5490 static void rbd_dev_free(struct rbd_device *rbd_dev)
5492 WARN_ON(rbd_dev->watch_state != RBD_WATCH_STATE_UNREGISTERED);
5493 WARN_ON(rbd_dev->lock_state != RBD_LOCK_STATE_UNLOCKED);
5495 ceph_oid_destroy(&rbd_dev->header_oid);
5496 ceph_oloc_destroy(&rbd_dev->header_oloc);
5497 kfree(rbd_dev->config_info);
5499 rbd_put_client(rbd_dev->rbd_client);
5500 rbd_spec_put(rbd_dev->spec);
5501 kfree(rbd_dev->opts);
5505 static void rbd_dev_release(struct device *dev)
5507 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5508 bool need_put = !!rbd_dev->opts;
5511 destroy_workqueue(rbd_dev->task_wq);
5512 ida_simple_remove(&rbd_dev_id_ida, rbd_dev->dev_id);
5515 rbd_dev_free(rbd_dev);
5518 * This is racy, but way better than putting module outside of
5519 * the release callback. The race window is pretty small, so
5520 * doing something similar to dm (dm-builtin.c) is overkill.
5523 module_put(THIS_MODULE);
5526 static struct rbd_device *__rbd_dev_create(struct rbd_client *rbdc,
5527 struct rbd_spec *spec)
5529 struct rbd_device *rbd_dev;
5531 rbd_dev = kzalloc(sizeof(*rbd_dev), GFP_KERNEL);
5535 spin_lock_init(&rbd_dev->lock);
5536 INIT_LIST_HEAD(&rbd_dev->node);
5537 init_rwsem(&rbd_dev->header_rwsem);
5539 rbd_dev->header.data_pool_id = CEPH_NOPOOL;
5540 ceph_oid_init(&rbd_dev->header_oid);
5541 rbd_dev->header_oloc.pool = spec->pool_id;
5542 if (spec->pool_ns) {
5543 WARN_ON(!*spec->pool_ns);
5544 rbd_dev->header_oloc.pool_ns =
5545 ceph_find_or_create_string(spec->pool_ns,
5546 strlen(spec->pool_ns));
5549 mutex_init(&rbd_dev->watch_mutex);
5550 rbd_dev->watch_state = RBD_WATCH_STATE_UNREGISTERED;
5551 INIT_DELAYED_WORK(&rbd_dev->watch_dwork, rbd_reregister_watch);
5553 init_rwsem(&rbd_dev->lock_rwsem);
5554 rbd_dev->lock_state = RBD_LOCK_STATE_UNLOCKED;
5555 INIT_WORK(&rbd_dev->acquired_lock_work, rbd_notify_acquired_lock);
5556 INIT_WORK(&rbd_dev->released_lock_work, rbd_notify_released_lock);
5557 INIT_DELAYED_WORK(&rbd_dev->lock_dwork, rbd_acquire_lock);
5558 INIT_WORK(&rbd_dev->unlock_work, rbd_release_lock_work);
5559 spin_lock_init(&rbd_dev->lock_lists_lock);
5560 INIT_LIST_HEAD(&rbd_dev->acquiring_list);
5561 INIT_LIST_HEAD(&rbd_dev->running_list);
5562 init_completion(&rbd_dev->acquire_wait);
5563 init_completion(&rbd_dev->releasing_wait);
5565 spin_lock_init(&rbd_dev->object_map_lock);
5567 rbd_dev->dev.bus = &rbd_bus_type;
5568 rbd_dev->dev.type = &rbd_device_type;
5569 rbd_dev->dev.parent = &rbd_root_dev;
5570 device_initialize(&rbd_dev->dev);
5572 rbd_dev->rbd_client = rbdc;
5573 rbd_dev->spec = spec;
5579 * Create a mapping rbd_dev.
5581 static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
5582 struct rbd_spec *spec,
5583 struct rbd_options *opts)
5585 struct rbd_device *rbd_dev;
5587 rbd_dev = __rbd_dev_create(rbdc, spec);
5591 rbd_dev->opts = opts;
5593 /* get an id and fill in device name */
5594 rbd_dev->dev_id = ida_simple_get(&rbd_dev_id_ida, 0,
5595 minor_to_rbd_dev_id(1 << MINORBITS),
5597 if (rbd_dev->dev_id < 0)
5600 sprintf(rbd_dev->name, RBD_DRV_NAME "%d", rbd_dev->dev_id);
5601 rbd_dev->task_wq = alloc_ordered_workqueue("%s-tasks", WQ_MEM_RECLAIM,
5603 if (!rbd_dev->task_wq)
5606 /* we have a ref from do_rbd_add() */
5607 __module_get(THIS_MODULE);
5609 dout("%s rbd_dev %p dev_id %d\n", __func__, rbd_dev, rbd_dev->dev_id);
5613 ida_simple_remove(&rbd_dev_id_ida, rbd_dev->dev_id);
5615 rbd_dev_free(rbd_dev);
5619 static void rbd_dev_destroy(struct rbd_device *rbd_dev)
5622 put_device(&rbd_dev->dev);
5626 * Get the size and object order for an image snapshot, or if
5627 * snap_id is CEPH_NOSNAP, gets this information for the base
5630 static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
5631 u8 *order, u64 *snap_size)
5633 __le64 snapid = cpu_to_le64(snap_id);
5638 } __attribute__ ((packed)) size_buf = { 0 };
5640 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
5641 &rbd_dev->header_oloc, "get_size",
5642 &snapid, sizeof(snapid),
5643 &size_buf, sizeof(size_buf));
5644 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
5647 if (ret < sizeof (size_buf))
5651 *order = size_buf.order;
5652 dout(" order %u", (unsigned int)*order);
5654 *snap_size = le64_to_cpu(size_buf.size);
5656 dout(" snap_id 0x%016llx snap_size = %llu\n",
5657 (unsigned long long)snap_id,
5658 (unsigned long long)*snap_size);
5663 static int rbd_dev_v2_image_size(struct rbd_device *rbd_dev)
5665 return _rbd_dev_v2_snap_size(rbd_dev, CEPH_NOSNAP,
5666 &rbd_dev->header.obj_order,
5667 &rbd_dev->header.image_size);
5670 static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev)
5676 reply_buf = kzalloc(RBD_OBJ_PREFIX_LEN_MAX, GFP_KERNEL);
5680 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
5681 &rbd_dev->header_oloc, "get_object_prefix",
5682 NULL, 0, reply_buf, RBD_OBJ_PREFIX_LEN_MAX);
5683 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
5688 rbd_dev->header.object_prefix = ceph_extract_encoded_string(&p,
5689 p + ret, NULL, GFP_NOIO);
5692 if (IS_ERR(rbd_dev->header.object_prefix)) {
5693 ret = PTR_ERR(rbd_dev->header.object_prefix);
5694 rbd_dev->header.object_prefix = NULL;
5696 dout(" object_prefix = %s\n", rbd_dev->header.object_prefix);
5704 static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
5707 __le64 snapid = cpu_to_le64(snap_id);
5711 } __attribute__ ((packed)) features_buf = { 0 };
5715 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
5716 &rbd_dev->header_oloc, "get_features",
5717 &snapid, sizeof(snapid),
5718 &features_buf, sizeof(features_buf));
5719 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
5722 if (ret < sizeof (features_buf))
5725 unsup = le64_to_cpu(features_buf.incompat) & ~RBD_FEATURES_SUPPORTED;
5727 rbd_warn(rbd_dev, "image uses unsupported features: 0x%llx",
5732 *snap_features = le64_to_cpu(features_buf.features);
5734 dout(" snap_id 0x%016llx features = 0x%016llx incompat = 0x%016llx\n",
5735 (unsigned long long)snap_id,
5736 (unsigned long long)*snap_features,
5737 (unsigned long long)le64_to_cpu(features_buf.incompat));
5742 static int rbd_dev_v2_features(struct rbd_device *rbd_dev)
5744 return _rbd_dev_v2_snap_features(rbd_dev, CEPH_NOSNAP,
5745 &rbd_dev->header.features);
5749 * These are generic image flags, but since they are used only for
5750 * object map, store them in rbd_dev->object_map_flags.
5752 * For the same reason, this function is called only on object map
5753 * (re)load and not on header refresh.
5755 static int rbd_dev_v2_get_flags(struct rbd_device *rbd_dev)
5757 __le64 snapid = cpu_to_le64(rbd_dev->spec->snap_id);
5761 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
5762 &rbd_dev->header_oloc, "get_flags",
5763 &snapid, sizeof(snapid),
5764 &flags, sizeof(flags));
5767 if (ret < sizeof(flags))
5770 rbd_dev->object_map_flags = le64_to_cpu(flags);
5774 struct parent_image_info {
5776 const char *pool_ns;
5777 const char *image_id;
5785 * The caller is responsible for @pii.
5787 static int decode_parent_image_spec(void **p, void *end,
5788 struct parent_image_info *pii)
5794 ret = ceph_start_decoding(p, end, 1, "ParentImageSpec",
5795 &struct_v, &struct_len);
5799 ceph_decode_64_safe(p, end, pii->pool_id, e_inval);
5800 pii->pool_ns = ceph_extract_encoded_string(p, end, NULL, GFP_KERNEL);
5801 if (IS_ERR(pii->pool_ns)) {
5802 ret = PTR_ERR(pii->pool_ns);
5803 pii->pool_ns = NULL;
5806 pii->image_id = ceph_extract_encoded_string(p, end, NULL, GFP_KERNEL);
5807 if (IS_ERR(pii->image_id)) {
5808 ret = PTR_ERR(pii->image_id);
5809 pii->image_id = NULL;
5812 ceph_decode_64_safe(p, end, pii->snap_id, e_inval);
5819 static int __get_parent_info(struct rbd_device *rbd_dev,
5820 struct page *req_page,
5821 struct page *reply_page,
5822 struct parent_image_info *pii)
5824 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
5825 size_t reply_len = PAGE_SIZE;
5829 ret = ceph_osdc_call(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
5830 "rbd", "parent_get", CEPH_OSD_FLAG_READ,
5831 req_page, sizeof(u64), &reply_page, &reply_len);
5833 return ret == -EOPNOTSUPP ? 1 : ret;
5835 p = page_address(reply_page);
5836 end = p + reply_len;
5837 ret = decode_parent_image_spec(&p, end, pii);
5841 ret = ceph_osdc_call(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
5842 "rbd", "parent_overlap_get", CEPH_OSD_FLAG_READ,
5843 req_page, sizeof(u64), &reply_page, &reply_len);
5847 p = page_address(reply_page);
5848 end = p + reply_len;
5849 ceph_decode_8_safe(&p, end, pii->has_overlap, e_inval);
5850 if (pii->has_overlap)
5851 ceph_decode_64_safe(&p, end, pii->overlap, e_inval);
5860 * The caller is responsible for @pii.
5862 static int __get_parent_info_legacy(struct rbd_device *rbd_dev,
5863 struct page *req_page,
5864 struct page *reply_page,
5865 struct parent_image_info *pii)
5867 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
5868 size_t reply_len = PAGE_SIZE;
5872 ret = ceph_osdc_call(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
5873 "rbd", "get_parent", CEPH_OSD_FLAG_READ,
5874 req_page, sizeof(u64), &reply_page, &reply_len);
5878 p = page_address(reply_page);
5879 end = p + reply_len;
5880 ceph_decode_64_safe(&p, end, pii->pool_id, e_inval);
5881 pii->image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
5882 if (IS_ERR(pii->image_id)) {
5883 ret = PTR_ERR(pii->image_id);
5884 pii->image_id = NULL;
5887 ceph_decode_64_safe(&p, end, pii->snap_id, e_inval);
5888 pii->has_overlap = true;
5889 ceph_decode_64_safe(&p, end, pii->overlap, e_inval);
5897 static int get_parent_info(struct rbd_device *rbd_dev,
5898 struct parent_image_info *pii)
5900 struct page *req_page, *reply_page;
5904 req_page = alloc_page(GFP_KERNEL);
5908 reply_page = alloc_page(GFP_KERNEL);
5910 __free_page(req_page);
5914 p = page_address(req_page);
5915 ceph_encode_64(&p, rbd_dev->spec->snap_id);
5916 ret = __get_parent_info(rbd_dev, req_page, reply_page, pii);
5918 ret = __get_parent_info_legacy(rbd_dev, req_page, reply_page,
5921 __free_page(req_page);
5922 __free_page(reply_page);
5926 static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
5928 struct rbd_spec *parent_spec;
5929 struct parent_image_info pii = { 0 };
5932 parent_spec = rbd_spec_alloc();
5936 ret = get_parent_info(rbd_dev, &pii);
5940 dout("%s pool_id %llu pool_ns %s image_id %s snap_id %llu has_overlap %d overlap %llu\n",
5941 __func__, pii.pool_id, pii.pool_ns, pii.image_id, pii.snap_id,
5942 pii.has_overlap, pii.overlap);
5944 if (pii.pool_id == CEPH_NOPOOL || !pii.has_overlap) {
5946 * Either the parent never existed, or we have
5947 * record of it but the image got flattened so it no
5948 * longer has a parent. When the parent of a
5949 * layered image disappears we immediately set the
5950 * overlap to 0. The effect of this is that all new
5951 * requests will be treated as if the image had no
5954 * If !pii.has_overlap, the parent image spec is not
5955 * applicable. It's there to avoid duplication in each
5958 if (rbd_dev->parent_overlap) {
5959 rbd_dev->parent_overlap = 0;
5960 rbd_dev_parent_put(rbd_dev);
5961 pr_info("%s: clone image has been flattened\n",
5962 rbd_dev->disk->disk_name);
5965 goto out; /* No parent? No problem. */
5968 /* The ceph file layout needs to fit pool id in 32 bits */
5971 if (pii.pool_id > (u64)U32_MAX) {
5972 rbd_warn(NULL, "parent pool id too large (%llu > %u)",
5973 (unsigned long long)pii.pool_id, U32_MAX);
5978 * The parent won't change (except when the clone is
5979 * flattened, already handled that). So we only need to
5980 * record the parent spec we have not already done so.
5982 if (!rbd_dev->parent_spec) {
5983 parent_spec->pool_id = pii.pool_id;
5984 if (pii.pool_ns && *pii.pool_ns) {
5985 parent_spec->pool_ns = pii.pool_ns;
5988 parent_spec->image_id = pii.image_id;
5989 pii.image_id = NULL;
5990 parent_spec->snap_id = pii.snap_id;
5992 rbd_dev->parent_spec = parent_spec;
5993 parent_spec = NULL; /* rbd_dev now owns this */
5997 * We always update the parent overlap. If it's zero we issue
5998 * a warning, as we will proceed as if there was no parent.
6002 /* refresh, careful to warn just once */
6003 if (rbd_dev->parent_overlap)
6005 "clone now standalone (overlap became 0)");
6008 rbd_warn(rbd_dev, "clone is standalone (overlap 0)");
6011 rbd_dev->parent_overlap = pii.overlap;
6017 kfree(pii.image_id);
6018 rbd_spec_put(parent_spec);
6022 static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev)
6026 __le64 stripe_count;
6027 } __attribute__ ((packed)) striping_info_buf = { 0 };
6028 size_t size = sizeof (striping_info_buf);
6032 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
6033 &rbd_dev->header_oloc, "get_stripe_unit_count",
6034 NULL, 0, &striping_info_buf, size);
6035 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
6041 p = &striping_info_buf;
6042 rbd_dev->header.stripe_unit = ceph_decode_64(&p);
6043 rbd_dev->header.stripe_count = ceph_decode_64(&p);
6047 static int rbd_dev_v2_data_pool(struct rbd_device *rbd_dev)
6049 __le64 data_pool_id;
6052 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
6053 &rbd_dev->header_oloc, "get_data_pool",
6054 NULL, 0, &data_pool_id, sizeof(data_pool_id));
6057 if (ret < sizeof(data_pool_id))
6060 rbd_dev->header.data_pool_id = le64_to_cpu(data_pool_id);
6061 WARN_ON(rbd_dev->header.data_pool_id == CEPH_NOPOOL);
6065 static char *rbd_dev_image_name(struct rbd_device *rbd_dev)
6067 CEPH_DEFINE_OID_ONSTACK(oid);
6068 size_t image_id_size;
6073 void *reply_buf = NULL;
6075 char *image_name = NULL;
6078 rbd_assert(!rbd_dev->spec->image_name);
6080 len = strlen(rbd_dev->spec->image_id);
6081 image_id_size = sizeof (__le32) + len;
6082 image_id = kmalloc(image_id_size, GFP_KERNEL);
6087 end = image_id + image_id_size;
6088 ceph_encode_string(&p, end, rbd_dev->spec->image_id, (u32)len);
6090 size = sizeof (__le32) + RBD_IMAGE_NAME_LEN_MAX;
6091 reply_buf = kmalloc(size, GFP_KERNEL);
6095 ceph_oid_printf(&oid, "%s", RBD_DIRECTORY);
6096 ret = rbd_obj_method_sync(rbd_dev, &oid, &rbd_dev->header_oloc,
6097 "dir_get_name", image_id, image_id_size,
6102 end = reply_buf + ret;
6104 image_name = ceph_extract_encoded_string(&p, end, &len, GFP_KERNEL);
6105 if (IS_ERR(image_name))
6108 dout("%s: name is %s len is %zd\n", __func__, image_name, len);
6116 static u64 rbd_v1_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
6118 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
6119 const char *snap_name;
6122 /* Skip over names until we find the one we are looking for */
6124 snap_name = rbd_dev->header.snap_names;
6125 while (which < snapc->num_snaps) {
6126 if (!strcmp(name, snap_name))
6127 return snapc->snaps[which];
6128 snap_name += strlen(snap_name) + 1;
6134 static u64 rbd_v2_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
6136 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
6141 for (which = 0; !found && which < snapc->num_snaps; which++) {
6142 const char *snap_name;
6144 snap_id = snapc->snaps[which];
6145 snap_name = rbd_dev_v2_snap_name(rbd_dev, snap_id);
6146 if (IS_ERR(snap_name)) {
6147 /* ignore no-longer existing snapshots */
6148 if (PTR_ERR(snap_name) == -ENOENT)
6153 found = !strcmp(name, snap_name);
6156 return found ? snap_id : CEPH_NOSNAP;
6160 * Assumes name is never RBD_SNAP_HEAD_NAME; returns CEPH_NOSNAP if
6161 * no snapshot by that name is found, or if an error occurs.
6163 static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
6165 if (rbd_dev->image_format == 1)
6166 return rbd_v1_snap_id_by_name(rbd_dev, name);
6168 return rbd_v2_snap_id_by_name(rbd_dev, name);
6172 * An image being mapped will have everything but the snap id.
6174 static int rbd_spec_fill_snap_id(struct rbd_device *rbd_dev)
6176 struct rbd_spec *spec = rbd_dev->spec;
6178 rbd_assert(spec->pool_id != CEPH_NOPOOL && spec->pool_name);
6179 rbd_assert(spec->image_id && spec->image_name);
6180 rbd_assert(spec->snap_name);
6182 if (strcmp(spec->snap_name, RBD_SNAP_HEAD_NAME)) {
6185 snap_id = rbd_snap_id_by_name(rbd_dev, spec->snap_name);
6186 if (snap_id == CEPH_NOSNAP)
6189 spec->snap_id = snap_id;
6191 spec->snap_id = CEPH_NOSNAP;
6198 * A parent image will have all ids but none of the names.
6200 * All names in an rbd spec are dynamically allocated. It's OK if we
6201 * can't figure out the name for an image id.
6203 static int rbd_spec_fill_names(struct rbd_device *rbd_dev)
6205 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
6206 struct rbd_spec *spec = rbd_dev->spec;
6207 const char *pool_name;
6208 const char *image_name;
6209 const char *snap_name;
6212 rbd_assert(spec->pool_id != CEPH_NOPOOL);
6213 rbd_assert(spec->image_id);
6214 rbd_assert(spec->snap_id != CEPH_NOSNAP);
6216 /* Get the pool name; we have to make our own copy of this */
6218 pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, spec->pool_id);
6220 rbd_warn(rbd_dev, "no pool with id %llu", spec->pool_id);
6223 pool_name = kstrdup(pool_name, GFP_KERNEL);
6227 /* Fetch the image name; tolerate failure here */
6229 image_name = rbd_dev_image_name(rbd_dev);
6231 rbd_warn(rbd_dev, "unable to get image name");
6233 /* Fetch the snapshot name */
6235 snap_name = rbd_snap_name(rbd_dev, spec->snap_id);
6236 if (IS_ERR(snap_name)) {
6237 ret = PTR_ERR(snap_name);
6241 spec->pool_name = pool_name;
6242 spec->image_name = image_name;
6243 spec->snap_name = snap_name;
6253 static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev)
6262 struct ceph_snap_context *snapc;
6266 * We'll need room for the seq value (maximum snapshot id),
6267 * snapshot count, and array of that many snapshot ids.
6268 * For now we have a fixed upper limit on the number we're
6269 * prepared to receive.
6271 size = sizeof (__le64) + sizeof (__le32) +
6272 RBD_MAX_SNAP_COUNT * sizeof (__le64);
6273 reply_buf = kzalloc(size, GFP_KERNEL);
6277 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
6278 &rbd_dev->header_oloc, "get_snapcontext",
6279 NULL, 0, reply_buf, size);
6280 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
6285 end = reply_buf + ret;
6287 ceph_decode_64_safe(&p, end, seq, out);
6288 ceph_decode_32_safe(&p, end, snap_count, out);
6291 * Make sure the reported number of snapshot ids wouldn't go
6292 * beyond the end of our buffer. But before checking that,
6293 * make sure the computed size of the snapshot context we
6294 * allocate is representable in a size_t.
6296 if (snap_count > (SIZE_MAX - sizeof (struct ceph_snap_context))
6301 if (!ceph_has_room(&p, end, snap_count * sizeof (__le64)))
6305 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
6311 for (i = 0; i < snap_count; i++)
6312 snapc->snaps[i] = ceph_decode_64(&p);
6314 ceph_put_snap_context(rbd_dev->header.snapc);
6315 rbd_dev->header.snapc = snapc;
6317 dout(" snap context seq = %llu, snap_count = %u\n",
6318 (unsigned long long)seq, (unsigned int)snap_count);
6325 static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
6336 size = sizeof (__le32) + RBD_MAX_SNAP_NAME_LEN;
6337 reply_buf = kmalloc(size, GFP_KERNEL);
6339 return ERR_PTR(-ENOMEM);
6341 snapid = cpu_to_le64(snap_id);
6342 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
6343 &rbd_dev->header_oloc, "get_snapshot_name",
6344 &snapid, sizeof(snapid), reply_buf, size);
6345 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
6347 snap_name = ERR_PTR(ret);
6352 end = reply_buf + ret;
6353 snap_name = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
6354 if (IS_ERR(snap_name))
6357 dout(" snap_id 0x%016llx snap_name = %s\n",
6358 (unsigned long long)snap_id, snap_name);
6365 static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev)
6367 bool first_time = rbd_dev->header.object_prefix == NULL;
6370 ret = rbd_dev_v2_image_size(rbd_dev);
6375 ret = rbd_dev_v2_header_onetime(rbd_dev);
6380 ret = rbd_dev_v2_snap_context(rbd_dev);
6381 if (ret && first_time) {
6382 kfree(rbd_dev->header.object_prefix);
6383 rbd_dev->header.object_prefix = NULL;
6389 static int rbd_dev_header_info(struct rbd_device *rbd_dev)
6391 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
6393 if (rbd_dev->image_format == 1)
6394 return rbd_dev_v1_header_info(rbd_dev);
6396 return rbd_dev_v2_header_info(rbd_dev);
6400 * Skips over white space at *buf, and updates *buf to point to the
6401 * first found non-space character (if any). Returns the length of
6402 * the token (string of non-white space characters) found. Note
6403 * that *buf must be terminated with '\0'.
6405 static inline size_t next_token(const char **buf)
6408 * These are the characters that produce nonzero for
6409 * isspace() in the "C" and "POSIX" locales.
6411 const char *spaces = " \f\n\r\t\v";
6413 *buf += strspn(*buf, spaces); /* Find start of token */
6415 return strcspn(*buf, spaces); /* Return token length */
6419 * Finds the next token in *buf, dynamically allocates a buffer big
6420 * enough to hold a copy of it, and copies the token into the new
6421 * buffer. The copy is guaranteed to be terminated with '\0'. Note
6422 * that a duplicate buffer is created even for a zero-length token.
6424 * Returns a pointer to the newly-allocated duplicate, or a null
6425 * pointer if memory for the duplicate was not available. If
6426 * the lenp argument is a non-null pointer, the length of the token
6427 * (not including the '\0') is returned in *lenp.
6429 * If successful, the *buf pointer will be updated to point beyond
6430 * the end of the found token.
6432 * Note: uses GFP_KERNEL for allocation.
6434 static inline char *dup_token(const char **buf, size_t *lenp)
6439 len = next_token(buf);
6440 dup = kmemdup(*buf, len + 1, GFP_KERNEL);
6443 *(dup + len) = '\0';
6453 * Parse the options provided for an "rbd add" (i.e., rbd image
6454 * mapping) request. These arrive via a write to /sys/bus/rbd/add,
6455 * and the data written is passed here via a NUL-terminated buffer.
6456 * Returns 0 if successful or an error code otherwise.
6458 * The information extracted from these options is recorded in
6459 * the other parameters which return dynamically-allocated
6462 * The address of a pointer that will refer to a ceph options
6463 * structure. Caller must release the returned pointer using
6464 * ceph_destroy_options() when it is no longer needed.
6466 * Address of an rbd options pointer. Fully initialized by
6467 * this function; caller must release with kfree().
6469 * Address of an rbd image specification pointer. Fully
6470 * initialized by this function based on parsed options.
6471 * Caller must release with rbd_spec_put().
6473 * The options passed take this form:
6474 * <mon_addrs> <options> <pool_name> <image_name> [<snap_id>]
6477 * A comma-separated list of one or more monitor addresses.
6478 * A monitor address is an ip address, optionally followed
6479 * by a port number (separated by a colon).
6480 * I.e.: ip1[:port1][,ip2[:port2]...]
6482 * A comma-separated list of ceph and/or rbd options.
6484 * The name of the rados pool containing the rbd image.
6486 * The name of the image in that pool to map.
6488 * An optional snapshot id. If provided, the mapping will
6489 * present data from the image at the time that snapshot was
6490 * created. The image head is used if no snapshot id is
6491 * provided. Snapshot mappings are always read-only.
6493 static int rbd_add_parse_args(const char *buf,
6494 struct ceph_options **ceph_opts,
6495 struct rbd_options **opts,
6496 struct rbd_spec **rbd_spec)
6500 const char *mon_addrs;
6502 size_t mon_addrs_size;
6503 struct parse_rbd_opts_ctx pctx = { 0 };
6504 struct ceph_options *copts;
6507 /* The first four tokens are required */
6509 len = next_token(&buf);
6511 rbd_warn(NULL, "no monitor address(es) provided");
6515 mon_addrs_size = len + 1;
6519 options = dup_token(&buf, NULL);
6523 rbd_warn(NULL, "no options provided");
6527 pctx.spec = rbd_spec_alloc();
6531 pctx.spec->pool_name = dup_token(&buf, NULL);
6532 if (!pctx.spec->pool_name)
6534 if (!*pctx.spec->pool_name) {
6535 rbd_warn(NULL, "no pool name provided");
6539 pctx.spec->image_name = dup_token(&buf, NULL);
6540 if (!pctx.spec->image_name)
6542 if (!*pctx.spec->image_name) {
6543 rbd_warn(NULL, "no image name provided");
6548 * Snapshot name is optional; default is to use "-"
6549 * (indicating the head/no snapshot).
6551 len = next_token(&buf);
6553 buf = RBD_SNAP_HEAD_NAME; /* No snapshot supplied */
6554 len = sizeof (RBD_SNAP_HEAD_NAME) - 1;
6555 } else if (len > RBD_MAX_SNAP_NAME_LEN) {
6556 ret = -ENAMETOOLONG;
6559 snap_name = kmemdup(buf, len + 1, GFP_KERNEL);
6562 *(snap_name + len) = '\0';
6563 pctx.spec->snap_name = snap_name;
6565 /* Initialize all rbd options to the defaults */
6567 pctx.opts = kzalloc(sizeof(*pctx.opts), GFP_KERNEL);
6571 pctx.opts->read_only = RBD_READ_ONLY_DEFAULT;
6572 pctx.opts->queue_depth = RBD_QUEUE_DEPTH_DEFAULT;
6573 pctx.opts->alloc_size = RBD_ALLOC_SIZE_DEFAULT;
6574 pctx.opts->lock_timeout = RBD_LOCK_TIMEOUT_DEFAULT;
6575 pctx.opts->lock_on_read = RBD_LOCK_ON_READ_DEFAULT;
6576 pctx.opts->exclusive = RBD_EXCLUSIVE_DEFAULT;
6577 pctx.opts->trim = RBD_TRIM_DEFAULT;
6579 copts = ceph_parse_options(options, mon_addrs,
6580 mon_addrs + mon_addrs_size - 1,
6581 parse_rbd_opts_token, &pctx);
6582 if (IS_ERR(copts)) {
6583 ret = PTR_ERR(copts);
6590 *rbd_spec = pctx.spec;
6597 rbd_spec_put(pctx.spec);
6603 static void rbd_dev_image_unlock(struct rbd_device *rbd_dev)
6605 down_write(&rbd_dev->lock_rwsem);
6606 if (__rbd_is_lock_owner(rbd_dev))
6607 __rbd_release_lock(rbd_dev);
6608 up_write(&rbd_dev->lock_rwsem);
6612 * If the wait is interrupted, an error is returned even if the lock
6613 * was successfully acquired. rbd_dev_image_unlock() will release it
6616 static int rbd_add_acquire_lock(struct rbd_device *rbd_dev)
6620 if (!(rbd_dev->header.features & RBD_FEATURE_EXCLUSIVE_LOCK)) {
6621 if (!rbd_dev->opts->exclusive && !rbd_dev->opts->lock_on_read)
6624 rbd_warn(rbd_dev, "exclusive-lock feature is not enabled");
6628 if (rbd_dev->spec->snap_id != CEPH_NOSNAP)
6631 rbd_assert(!rbd_is_lock_owner(rbd_dev));
6632 queue_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork, 0);
6633 ret = wait_for_completion_killable_timeout(&rbd_dev->acquire_wait,
6634 ceph_timeout_jiffies(rbd_dev->opts->lock_timeout));
6636 ret = rbd_dev->acquire_err;
6641 rbd_warn(rbd_dev, "failed to acquire exclusive lock: %ld", ret);
6646 * The lock may have been released by now, unless automatic lock
6647 * transitions are disabled.
6649 rbd_assert(!rbd_dev->opts->exclusive || rbd_is_lock_owner(rbd_dev));
6654 * An rbd format 2 image has a unique identifier, distinct from the
6655 * name given to it by the user. Internally, that identifier is
6656 * what's used to specify the names of objects related to the image.
6658 * A special "rbd id" object is used to map an rbd image name to its
6659 * id. If that object doesn't exist, then there is no v2 rbd image
6660 * with the supplied name.
6662 * This function will record the given rbd_dev's image_id field if
6663 * it can be determined, and in that case will return 0. If any
6664 * errors occur a negative errno will be returned and the rbd_dev's
6665 * image_id field will be unchanged (and should be NULL).
6667 static int rbd_dev_image_id(struct rbd_device *rbd_dev)
6671 CEPH_DEFINE_OID_ONSTACK(oid);
6676 * When probing a parent image, the image id is already
6677 * known (and the image name likely is not). There's no
6678 * need to fetch the image id again in this case. We
6679 * do still need to set the image format though.
6681 if (rbd_dev->spec->image_id) {
6682 rbd_dev->image_format = *rbd_dev->spec->image_id ? 2 : 1;
6688 * First, see if the format 2 image id file exists, and if
6689 * so, get the image's persistent id from it.
6691 ret = ceph_oid_aprintf(&oid, GFP_KERNEL, "%s%s", RBD_ID_PREFIX,
6692 rbd_dev->spec->image_name);
6696 dout("rbd id object name is %s\n", oid.name);
6698 /* Response will be an encoded string, which includes a length */
6700 size = sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX;
6701 response = kzalloc(size, GFP_NOIO);
6707 /* If it doesn't exist we'll assume it's a format 1 image */
6709 ret = rbd_obj_method_sync(rbd_dev, &oid, &rbd_dev->header_oloc,
6711 response, RBD_IMAGE_ID_LEN_MAX);
6712 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
6713 if (ret == -ENOENT) {
6714 image_id = kstrdup("", GFP_KERNEL);
6715 ret = image_id ? 0 : -ENOMEM;
6717 rbd_dev->image_format = 1;
6718 } else if (ret >= 0) {
6721 image_id = ceph_extract_encoded_string(&p, p + ret,
6723 ret = PTR_ERR_OR_ZERO(image_id);
6725 rbd_dev->image_format = 2;
6729 rbd_dev->spec->image_id = image_id;
6730 dout("image_id is %s\n", image_id);
6734 ceph_oid_destroy(&oid);
6739 * Undo whatever state changes are made by v1 or v2 header info
6742 static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
6744 struct rbd_image_header *header;
6746 rbd_dev_parent_put(rbd_dev);
6747 rbd_object_map_free(rbd_dev);
6748 rbd_dev_mapping_clear(rbd_dev);
6750 /* Free dynamic fields from the header, then zero it out */
6752 header = &rbd_dev->header;
6753 ceph_put_snap_context(header->snapc);
6754 kfree(header->snap_sizes);
6755 kfree(header->snap_names);
6756 kfree(header->object_prefix);
6757 memset(header, 0, sizeof (*header));
6760 static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev)
6764 ret = rbd_dev_v2_object_prefix(rbd_dev);
6769 * Get the and check features for the image. Currently the
6770 * features are assumed to never change.
6772 ret = rbd_dev_v2_features(rbd_dev);
6776 /* If the image supports fancy striping, get its parameters */
6778 if (rbd_dev->header.features & RBD_FEATURE_STRIPINGV2) {
6779 ret = rbd_dev_v2_striping_info(rbd_dev);
6784 if (rbd_dev->header.features & RBD_FEATURE_DATA_POOL) {
6785 ret = rbd_dev_v2_data_pool(rbd_dev);
6790 rbd_init_layout(rbd_dev);
6794 rbd_dev->header.features = 0;
6795 kfree(rbd_dev->header.object_prefix);
6796 rbd_dev->header.object_prefix = NULL;
6801 * @depth is rbd_dev_image_probe() -> rbd_dev_probe_parent() ->
6802 * rbd_dev_image_probe() recursion depth, which means it's also the
6803 * length of the already discovered part of the parent chain.
6805 static int rbd_dev_probe_parent(struct rbd_device *rbd_dev, int depth)
6807 struct rbd_device *parent = NULL;
6810 if (!rbd_dev->parent_spec)
6813 if (++depth > RBD_MAX_PARENT_CHAIN_LEN) {
6814 pr_info("parent chain is too long (%d)\n", depth);
6819 parent = __rbd_dev_create(rbd_dev->rbd_client, rbd_dev->parent_spec);
6826 * Images related by parent/child relationships always share
6827 * rbd_client and spec/parent_spec, so bump their refcounts.
6829 __rbd_get_client(rbd_dev->rbd_client);
6830 rbd_spec_get(rbd_dev->parent_spec);
6832 ret = rbd_dev_image_probe(parent, depth);
6836 rbd_dev->parent = parent;
6837 atomic_set(&rbd_dev->parent_ref, 1);
6841 rbd_dev_unparent(rbd_dev);
6842 rbd_dev_destroy(parent);
6846 static void rbd_dev_device_release(struct rbd_device *rbd_dev)
6848 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
6849 rbd_free_disk(rbd_dev);
6851 unregister_blkdev(rbd_dev->major, rbd_dev->name);
6855 * rbd_dev->header_rwsem must be locked for write and will be unlocked
6858 static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
6862 /* Record our major and minor device numbers. */
6864 if (!single_major) {
6865 ret = register_blkdev(0, rbd_dev->name);
6867 goto err_out_unlock;
6869 rbd_dev->major = ret;
6872 rbd_dev->major = rbd_major;
6873 rbd_dev->minor = rbd_dev_id_to_minor(rbd_dev->dev_id);
6876 /* Set up the blkdev mapping. */
6878 ret = rbd_init_disk(rbd_dev);
6880 goto err_out_blkdev;
6882 set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
6883 set_disk_ro(rbd_dev->disk, rbd_dev->opts->read_only);
6885 ret = dev_set_name(&rbd_dev->dev, "%d", rbd_dev->dev_id);
6889 set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
6890 up_write(&rbd_dev->header_rwsem);
6894 rbd_free_disk(rbd_dev);
6897 unregister_blkdev(rbd_dev->major, rbd_dev->name);
6899 up_write(&rbd_dev->header_rwsem);
6903 static int rbd_dev_header_name(struct rbd_device *rbd_dev)
6905 struct rbd_spec *spec = rbd_dev->spec;
6908 /* Record the header object name for this rbd image. */
6910 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
6911 if (rbd_dev->image_format == 1)
6912 ret = ceph_oid_aprintf(&rbd_dev->header_oid, GFP_KERNEL, "%s%s",
6913 spec->image_name, RBD_SUFFIX);
6915 ret = ceph_oid_aprintf(&rbd_dev->header_oid, GFP_KERNEL, "%s%s",
6916 RBD_HEADER_PREFIX, spec->image_id);
6921 static void rbd_dev_image_release(struct rbd_device *rbd_dev)
6923 rbd_dev_unprobe(rbd_dev);
6925 rbd_unregister_watch(rbd_dev);
6926 rbd_dev->image_format = 0;
6927 kfree(rbd_dev->spec->image_id);
6928 rbd_dev->spec->image_id = NULL;
6932 * Probe for the existence of the header object for the given rbd
6933 * device. If this image is the one being mapped (i.e., not a
6934 * parent), initiate a watch on its header object before using that
6935 * object to get detailed information about the rbd image.
6937 static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth)
6942 * Get the id from the image id object. Unless there's an
6943 * error, rbd_dev->spec->image_id will be filled in with
6944 * a dynamically-allocated string, and rbd_dev->image_format
6945 * will be set to either 1 or 2.
6947 ret = rbd_dev_image_id(rbd_dev);
6951 ret = rbd_dev_header_name(rbd_dev);
6953 goto err_out_format;
6956 ret = rbd_register_watch(rbd_dev);
6959 pr_info("image %s/%s%s%s does not exist\n",
6960 rbd_dev->spec->pool_name,
6961 rbd_dev->spec->pool_ns ?: "",
6962 rbd_dev->spec->pool_ns ? "/" : "",
6963 rbd_dev->spec->image_name);
6964 goto err_out_format;
6968 ret = rbd_dev_header_info(rbd_dev);
6973 * If this image is the one being mapped, we have pool name and
6974 * id, image name and id, and snap name - need to fill snap id.
6975 * Otherwise this is a parent image, identified by pool, image
6976 * and snap ids - need to fill in names for those ids.
6979 ret = rbd_spec_fill_snap_id(rbd_dev);
6981 ret = rbd_spec_fill_names(rbd_dev);
6984 pr_info("snap %s/%s%s%s@%s does not exist\n",
6985 rbd_dev->spec->pool_name,
6986 rbd_dev->spec->pool_ns ?: "",
6987 rbd_dev->spec->pool_ns ? "/" : "",
6988 rbd_dev->spec->image_name,
6989 rbd_dev->spec->snap_name);
6993 ret = rbd_dev_mapping_set(rbd_dev);
6997 if (rbd_dev->spec->snap_id != CEPH_NOSNAP &&
6998 (rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP)) {
6999 ret = rbd_object_map_load(rbd_dev);
7004 if (rbd_dev->header.features & RBD_FEATURE_LAYERING) {
7005 ret = rbd_dev_v2_parent_info(rbd_dev);
7010 ret = rbd_dev_probe_parent(rbd_dev, depth);
7014 dout("discovered format %u image, header name is %s\n",
7015 rbd_dev->image_format, rbd_dev->header_oid.name);
7019 rbd_dev_unprobe(rbd_dev);
7022 rbd_unregister_watch(rbd_dev);
7024 rbd_dev->image_format = 0;
7025 kfree(rbd_dev->spec->image_id);
7026 rbd_dev->spec->image_id = NULL;
7030 static ssize_t do_rbd_add(struct bus_type *bus,
7034 struct rbd_device *rbd_dev = NULL;
7035 struct ceph_options *ceph_opts = NULL;
7036 struct rbd_options *rbd_opts = NULL;
7037 struct rbd_spec *spec = NULL;
7038 struct rbd_client *rbdc;
7041 if (!try_module_get(THIS_MODULE))
7044 /* parse add command */
7045 rc = rbd_add_parse_args(buf, &ceph_opts, &rbd_opts, &spec);
7049 rbdc = rbd_get_client(ceph_opts);
7056 rc = ceph_pg_poolid_by_name(rbdc->client->osdc.osdmap, spec->pool_name);
7059 pr_info("pool %s does not exist\n", spec->pool_name);
7060 goto err_out_client;
7062 spec->pool_id = (u64)rc;
7064 rbd_dev = rbd_dev_create(rbdc, spec, rbd_opts);
7067 goto err_out_client;
7069 rbdc = NULL; /* rbd_dev now owns this */
7070 spec = NULL; /* rbd_dev now owns this */
7071 rbd_opts = NULL; /* rbd_dev now owns this */
7073 rbd_dev->config_info = kstrdup(buf, GFP_KERNEL);
7074 if (!rbd_dev->config_info) {
7076 goto err_out_rbd_dev;
7079 down_write(&rbd_dev->header_rwsem);
7080 rc = rbd_dev_image_probe(rbd_dev, 0);
7082 up_write(&rbd_dev->header_rwsem);
7083 goto err_out_rbd_dev;
7086 /* If we are mapping a snapshot it must be marked read-only */
7087 if (rbd_dev->spec->snap_id != CEPH_NOSNAP)
7088 rbd_dev->opts->read_only = true;
7090 if (rbd_dev->opts->alloc_size > rbd_dev->layout.object_size) {
7091 rbd_warn(rbd_dev, "alloc_size adjusted to %u",
7092 rbd_dev->layout.object_size);
7093 rbd_dev->opts->alloc_size = rbd_dev->layout.object_size;
7096 rc = rbd_dev_device_setup(rbd_dev);
7098 goto err_out_image_probe;
7100 rc = rbd_add_acquire_lock(rbd_dev);
7102 goto err_out_image_lock;
7104 /* Everything's ready. Announce the disk to the world. */
7106 rc = device_add(&rbd_dev->dev);
7108 goto err_out_image_lock;
7110 add_disk(rbd_dev->disk);
7111 /* see rbd_init_disk() */
7112 blk_put_queue(rbd_dev->disk->queue);
7114 spin_lock(&rbd_dev_list_lock);
7115 list_add_tail(&rbd_dev->node, &rbd_dev_list);
7116 spin_unlock(&rbd_dev_list_lock);
7118 pr_info("%s: capacity %llu features 0x%llx\n", rbd_dev->disk->disk_name,
7119 (unsigned long long)get_capacity(rbd_dev->disk) << SECTOR_SHIFT,
7120 rbd_dev->header.features);
7123 module_put(THIS_MODULE);
7127 rbd_dev_image_unlock(rbd_dev);
7128 rbd_dev_device_release(rbd_dev);
7129 err_out_image_probe:
7130 rbd_dev_image_release(rbd_dev);
7132 rbd_dev_destroy(rbd_dev);
7134 rbd_put_client(rbdc);
7141 static ssize_t add_store(struct bus_type *bus, const char *buf, size_t count)
7146 return do_rbd_add(bus, buf, count);
7149 static ssize_t add_single_major_store(struct bus_type *bus, const char *buf,
7152 return do_rbd_add(bus, buf, count);
7155 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev)
7157 while (rbd_dev->parent) {
7158 struct rbd_device *first = rbd_dev;
7159 struct rbd_device *second = first->parent;
7160 struct rbd_device *third;
7163 * Follow to the parent with no grandparent and
7166 while (second && (third = second->parent)) {
7171 rbd_dev_image_release(second);
7172 rbd_dev_destroy(second);
7173 first->parent = NULL;
7174 first->parent_overlap = 0;
7176 rbd_assert(first->parent_spec);
7177 rbd_spec_put(first->parent_spec);
7178 first->parent_spec = NULL;
7182 static ssize_t do_rbd_remove(struct bus_type *bus,
7186 struct rbd_device *rbd_dev = NULL;
7187 struct list_head *tmp;
7195 sscanf(buf, "%d %5s", &dev_id, opt_buf);
7197 pr_err("dev_id out of range\n");
7200 if (opt_buf[0] != '\0') {
7201 if (!strcmp(opt_buf, "force")) {
7204 pr_err("bad remove option at '%s'\n", opt_buf);
7210 spin_lock(&rbd_dev_list_lock);
7211 list_for_each(tmp, &rbd_dev_list) {
7212 rbd_dev = list_entry(tmp, struct rbd_device, node);
7213 if (rbd_dev->dev_id == dev_id) {
7219 spin_lock_irq(&rbd_dev->lock);
7220 if (rbd_dev->open_count && !force)
7222 else if (test_and_set_bit(RBD_DEV_FLAG_REMOVING,
7225 spin_unlock_irq(&rbd_dev->lock);
7227 spin_unlock(&rbd_dev_list_lock);
7233 * Prevent new IO from being queued and wait for existing
7234 * IO to complete/fail.
7236 blk_mq_freeze_queue(rbd_dev->disk->queue);
7237 blk_set_queue_dying(rbd_dev->disk->queue);
7240 del_gendisk(rbd_dev->disk);
7241 spin_lock(&rbd_dev_list_lock);
7242 list_del_init(&rbd_dev->node);
7243 spin_unlock(&rbd_dev_list_lock);
7244 device_del(&rbd_dev->dev);
7246 rbd_dev_image_unlock(rbd_dev);
7247 rbd_dev_device_release(rbd_dev);
7248 rbd_dev_image_release(rbd_dev);
7249 rbd_dev_destroy(rbd_dev);
7253 static ssize_t remove_store(struct bus_type *bus, const char *buf, size_t count)
7258 return do_rbd_remove(bus, buf, count);
7261 static ssize_t remove_single_major_store(struct bus_type *bus, const char *buf,
7264 return do_rbd_remove(bus, buf, count);
7268 * create control files in sysfs
7271 static int __init rbd_sysfs_init(void)
7275 ret = device_register(&rbd_root_dev);
7279 ret = bus_register(&rbd_bus_type);
7281 device_unregister(&rbd_root_dev);
7286 static void __exit rbd_sysfs_cleanup(void)
7288 bus_unregister(&rbd_bus_type);
7289 device_unregister(&rbd_root_dev);
7292 static int __init rbd_slab_init(void)
7294 rbd_assert(!rbd_img_request_cache);
7295 rbd_img_request_cache = KMEM_CACHE(rbd_img_request, 0);
7296 if (!rbd_img_request_cache)
7299 rbd_assert(!rbd_obj_request_cache);
7300 rbd_obj_request_cache = KMEM_CACHE(rbd_obj_request, 0);
7301 if (!rbd_obj_request_cache)
7307 kmem_cache_destroy(rbd_img_request_cache);
7308 rbd_img_request_cache = NULL;
7312 static void rbd_slab_exit(void)
7314 rbd_assert(rbd_obj_request_cache);
7315 kmem_cache_destroy(rbd_obj_request_cache);
7316 rbd_obj_request_cache = NULL;
7318 rbd_assert(rbd_img_request_cache);
7319 kmem_cache_destroy(rbd_img_request_cache);
7320 rbd_img_request_cache = NULL;
7323 static int __init rbd_init(void)
7327 if (!libceph_compatible(NULL)) {
7328 rbd_warn(NULL, "libceph incompatibility (quitting)");
7332 rc = rbd_slab_init();
7337 * The number of active work items is limited by the number of
7338 * rbd devices * queue depth, so leave @max_active at default.
7340 rbd_wq = alloc_workqueue(RBD_DRV_NAME, WQ_MEM_RECLAIM, 0);
7347 rbd_major = register_blkdev(0, RBD_DRV_NAME);
7348 if (rbd_major < 0) {
7354 rc = rbd_sysfs_init();
7356 goto err_out_blkdev;
7359 pr_info("loaded (major %d)\n", rbd_major);
7361 pr_info("loaded\n");
7367 unregister_blkdev(rbd_major, RBD_DRV_NAME);
7369 destroy_workqueue(rbd_wq);
7375 static void __exit rbd_exit(void)
7377 ida_destroy(&rbd_dev_id_ida);
7378 rbd_sysfs_cleanup();
7380 unregister_blkdev(rbd_major, RBD_DRV_NAME);
7381 destroy_workqueue(rbd_wq);
7385 module_init(rbd_init);
7386 module_exit(rbd_exit);
7388 MODULE_AUTHOR("Alex Elder <elder@inktank.com>");
7389 MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
7390 MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
7391 /* following authorship retained from original osdblk.c */
7392 MODULE_AUTHOR("Jeff Garzik <jeff@garzik.org>");
7394 MODULE_DESCRIPTION("RADOS Block Device (RBD) driver");
7395 MODULE_LICENSE("GPL");