3 rbd.c -- Export ceph rados objects as a Linux block device
6 based on drivers/block/osdblk.c:
8 Copyright 2009 Red Hat, Inc.
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program; see the file COPYING. If not, write to
21 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 For usage instructions, please refer to:
27 Documentation/ABI/testing/sysfs-bus-rbd
31 #include <linux/ceph/libceph.h>
32 #include <linux/ceph/osd_client.h>
33 #include <linux/ceph/mon_client.h>
34 #include <linux/ceph/cls_lock_client.h>
35 #include <linux/ceph/striper.h>
36 #include <linux/ceph/decode.h>
37 #include <linux/parser.h>
38 #include <linux/bsearch.h>
40 #include <linux/kernel.h>
41 #include <linux/device.h>
42 #include <linux/module.h>
43 #include <linux/blk-mq.h>
45 #include <linux/blkdev.h>
46 #include <linux/slab.h>
47 #include <linux/idr.h>
48 #include <linux/workqueue.h>
50 #include "rbd_types.h"
52 #define RBD_DEBUG /* Activate rbd_assert() calls */
55 * Increment the given counter and return its updated value.
56 * If the counter is already 0 it will not be incremented.
57 * If the counter is already at its maximum value returns
58 * -EINVAL without updating it.
60 static int atomic_inc_return_safe(atomic_t *v)
64 counter = (unsigned int)__atomic_add_unless(v, 1, 0);
65 if (counter <= (unsigned int)INT_MAX)
73 /* Decrement the counter. Return the resulting value, or -EINVAL */
74 static int atomic_dec_return_safe(atomic_t *v)
78 counter = atomic_dec_return(v);
87 #define RBD_DRV_NAME "rbd"
89 #define RBD_MINORS_PER_MAJOR 256
90 #define RBD_SINGLE_MAJOR_PART_SHIFT 4
92 #define RBD_MAX_PARENT_CHAIN_LEN 16
94 #define RBD_SNAP_DEV_NAME_PREFIX "snap_"
95 #define RBD_MAX_SNAP_NAME_LEN \
96 (NAME_MAX - (sizeof (RBD_SNAP_DEV_NAME_PREFIX) - 1))
98 #define RBD_MAX_SNAP_COUNT 510 /* allows max snapc to fit in 4KB */
100 #define RBD_SNAP_HEAD_NAME "-"
102 #define BAD_SNAP_INDEX U32_MAX /* invalid index into snap array */
104 /* This allows a single page to hold an image name sent by OSD */
105 #define RBD_IMAGE_NAME_LEN_MAX (PAGE_SIZE - sizeof (__le32) - 1)
106 #define RBD_IMAGE_ID_LEN_MAX 64
108 #define RBD_OBJ_PREFIX_LEN_MAX 64
110 #define RBD_NOTIFY_TIMEOUT 5 /* seconds */
111 #define RBD_RETRY_DELAY msecs_to_jiffies(1000)
115 #define RBD_FEATURE_LAYERING (1ULL<<0)
116 #define RBD_FEATURE_STRIPINGV2 (1ULL<<1)
117 #define RBD_FEATURE_EXCLUSIVE_LOCK (1ULL<<2)
118 #define RBD_FEATURE_DATA_POOL (1ULL<<7)
119 #define RBD_FEATURE_OPERATIONS (1ULL<<8)
121 #define RBD_FEATURES_ALL (RBD_FEATURE_LAYERING | \
122 RBD_FEATURE_STRIPINGV2 | \
123 RBD_FEATURE_EXCLUSIVE_LOCK | \
124 RBD_FEATURE_DATA_POOL | \
125 RBD_FEATURE_OPERATIONS)
127 /* Features supported by this (client software) implementation. */
129 #define RBD_FEATURES_SUPPORTED (RBD_FEATURES_ALL)
132 * An RBD device name will be "rbd#", where the "rbd" comes from
133 * RBD_DRV_NAME above, and # is a unique integer identifier.
135 #define DEV_NAME_LEN 32
138 * block device image metadata (in-memory version)
140 struct rbd_image_header {
141 /* These six fields never change for a given rbd image */
147 u64 features; /* Might be changeable someday? */
149 /* The remaining fields need to be updated occasionally */
151 struct ceph_snap_context *snapc;
152 char *snap_names; /* format 1 only */
153 u64 *snap_sizes; /* format 1 only */
157 * An rbd image specification.
159 * The tuple (pool_id, image_id, snap_id) is sufficient to uniquely
160 * identify an image. Each rbd_dev structure includes a pointer to
161 * an rbd_spec structure that encapsulates this identity.
163 * Each of the id's in an rbd_spec has an associated name. For a
164 * user-mapped image, the names are supplied and the id's associated
165 * with them are looked up. For a layered image, a parent image is
166 * defined by the tuple, and the names are looked up.
168 * An rbd_dev structure contains a parent_spec pointer which is
169 * non-null if the image it represents is a child in a layered
170 * image. This pointer will refer to the rbd_spec structure used
171 * by the parent rbd_dev for its own identity (i.e., the structure
172 * is shared between the parent and child).
174 * Since these structures are populated once, during the discovery
175 * phase of image construction, they are effectively immutable so
176 * we make no effort to synchronize access to them.
178 * Note that code herein does not assume the image name is known (it
179 * could be a null pointer).
183 const char *pool_name;
185 const char *image_id;
186 const char *image_name;
189 const char *snap_name;
195 * an instance of the client. multiple devices may share an rbd client.
198 struct ceph_client *client;
200 struct list_head node;
203 struct rbd_img_request;
205 enum obj_request_type {
206 OBJ_REQUEST_NODATA = 1,
207 OBJ_REQUEST_BIO, /* pointer into provided bio (list) */
208 OBJ_REQUEST_BVECS, /* pointer into provided bio_vec array */
209 OBJ_REQUEST_OWN_BVECS, /* private bio_vec array, doesn't own pages */
212 enum obj_operation_type {
219 * Writes go through the following state machine to deal with
223 * RBD_OBJ_WRITE_GUARD ---------------> RBD_OBJ_WRITE_COPYUP
225 * v \------------------------------/
231 * Writes start in RBD_OBJ_WRITE_GUARD or _FLAT, depending on whether
232 * there is a parent or not.
234 enum rbd_obj_write_state {
235 RBD_OBJ_WRITE_FLAT = 1,
237 RBD_OBJ_WRITE_COPYUP,
240 struct rbd_obj_request {
241 struct ceph_object_extent ex;
243 bool tried_parent; /* for reads */
244 enum rbd_obj_write_state write_state; /* for writes */
247 struct rbd_img_request *img_request;
248 struct ceph_file_extent *img_extents;
252 struct ceph_bio_iter bio_pos;
254 struct ceph_bvec_iter bvec_pos;
259 struct bio_vec *copyup_bvecs;
260 u32 copyup_bvec_count;
262 struct ceph_osd_request *osd_req;
264 u64 xferred; /* bytes transferred */
271 IMG_REQ_CHILD, /* initiator: block = 0, child image = 1 */
272 IMG_REQ_LAYERED, /* ENOENT handling: normal = 0, layered = 1 */
275 struct rbd_img_request {
276 struct rbd_device *rbd_dev;
277 enum obj_operation_type op_type;
278 enum obj_request_type data_type;
281 u64 snap_id; /* for reads */
282 struct ceph_snap_context *snapc; /* for writes */
285 struct request *rq; /* block request */
286 struct rbd_obj_request *obj_request; /* obj req initiator */
288 spinlock_t completion_lock;
289 u64 xferred;/* aggregate bytes transferred */
290 int result; /* first nonzero obj_request result */
292 struct list_head object_extents; /* obj_req.ex structs */
293 u32 obj_request_count;
299 #define for_each_obj_request(ireq, oreq) \
300 list_for_each_entry(oreq, &(ireq)->object_extents, ex.oe_item)
301 #define for_each_obj_request_safe(ireq, oreq, n) \
302 list_for_each_entry_safe(oreq, n, &(ireq)->object_extents, ex.oe_item)
304 enum rbd_watch_state {
305 RBD_WATCH_STATE_UNREGISTERED,
306 RBD_WATCH_STATE_REGISTERED,
307 RBD_WATCH_STATE_ERROR,
310 enum rbd_lock_state {
311 RBD_LOCK_STATE_UNLOCKED,
312 RBD_LOCK_STATE_LOCKED,
313 RBD_LOCK_STATE_RELEASING,
316 /* WatchNotify::ClientId */
317 struct rbd_client_id {
331 int dev_id; /* blkdev unique id */
333 int major; /* blkdev assigned major */
335 struct gendisk *disk; /* blkdev's gendisk and rq */
337 u32 image_format; /* Either 1 or 2 */
338 struct rbd_client *rbd_client;
340 char name[DEV_NAME_LEN]; /* blkdev name, e.g. rbd3 */
342 spinlock_t lock; /* queue, flags, open_count */
344 struct rbd_image_header header;
345 unsigned long flags; /* possibly lock protected */
346 struct rbd_spec *spec;
347 struct rbd_options *opts;
348 char *config_info; /* add{,_single_major} string */
350 struct ceph_object_id header_oid;
351 struct ceph_object_locator header_oloc;
353 struct ceph_file_layout layout; /* used for all rbd requests */
355 struct mutex watch_mutex;
356 enum rbd_watch_state watch_state;
357 struct ceph_osd_linger_request *watch_handle;
359 struct delayed_work watch_dwork;
361 struct rw_semaphore lock_rwsem;
362 enum rbd_lock_state lock_state;
363 char lock_cookie[32];
364 struct rbd_client_id owner_cid;
365 struct work_struct acquired_lock_work;
366 struct work_struct released_lock_work;
367 struct delayed_work lock_dwork;
368 struct work_struct unlock_work;
369 wait_queue_head_t lock_waitq;
371 struct workqueue_struct *task_wq;
373 struct rbd_spec *parent_spec;
376 struct rbd_device *parent;
378 /* Block layer tags. */
379 struct blk_mq_tag_set tag_set;
381 /* protects updating the header */
382 struct rw_semaphore header_rwsem;
384 struct rbd_mapping mapping;
386 struct list_head node;
390 unsigned long open_count; /* protected by lock */
394 * Flag bits for rbd_dev->flags:
395 * - REMOVING (which is coupled with rbd_dev->open_count) is protected
397 * - BLACKLISTED is protected by rbd_dev->lock_rwsem
400 RBD_DEV_FLAG_EXISTS, /* mapped snapshot has not been deleted */
401 RBD_DEV_FLAG_REMOVING, /* this mapping is being removed */
402 RBD_DEV_FLAG_BLACKLISTED, /* our ceph_client is blacklisted */
405 static DEFINE_MUTEX(client_mutex); /* Serialize client creation */
407 static LIST_HEAD(rbd_dev_list); /* devices */
408 static DEFINE_SPINLOCK(rbd_dev_list_lock);
410 static LIST_HEAD(rbd_client_list); /* clients */
411 static DEFINE_SPINLOCK(rbd_client_list_lock);
413 /* Slab caches for frequently-allocated structures */
415 static struct kmem_cache *rbd_img_request_cache;
416 static struct kmem_cache *rbd_obj_request_cache;
418 static int rbd_major;
419 static DEFINE_IDA(rbd_dev_id_ida);
421 static struct workqueue_struct *rbd_wq;
424 * single-major requires >= 0.75 version of userspace rbd utility.
426 static bool single_major = true;
427 module_param(single_major, bool, S_IRUGO);
428 MODULE_PARM_DESC(single_major, "Use a single major number for all rbd devices (default: true)");
430 static ssize_t rbd_add(struct bus_type *bus, const char *buf,
432 static ssize_t rbd_remove(struct bus_type *bus, const char *buf,
434 static ssize_t rbd_add_single_major(struct bus_type *bus, const char *buf,
436 static ssize_t rbd_remove_single_major(struct bus_type *bus, const char *buf,
438 static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth);
440 static int rbd_dev_id_to_minor(int dev_id)
442 return dev_id << RBD_SINGLE_MAJOR_PART_SHIFT;
445 static int minor_to_rbd_dev_id(int minor)
447 return minor >> RBD_SINGLE_MAJOR_PART_SHIFT;
450 static bool __rbd_is_lock_owner(struct rbd_device *rbd_dev)
452 return rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED ||
453 rbd_dev->lock_state == RBD_LOCK_STATE_RELEASING;
456 static bool rbd_is_lock_owner(struct rbd_device *rbd_dev)
460 down_read(&rbd_dev->lock_rwsem);
461 is_lock_owner = __rbd_is_lock_owner(rbd_dev);
462 up_read(&rbd_dev->lock_rwsem);
463 return is_lock_owner;
466 static ssize_t rbd_supported_features_show(struct bus_type *bus, char *buf)
468 return sprintf(buf, "0x%llx\n", RBD_FEATURES_SUPPORTED);
471 static BUS_ATTR(add, S_IWUSR, NULL, rbd_add);
472 static BUS_ATTR(remove, S_IWUSR, NULL, rbd_remove);
473 static BUS_ATTR(add_single_major, S_IWUSR, NULL, rbd_add_single_major);
474 static BUS_ATTR(remove_single_major, S_IWUSR, NULL, rbd_remove_single_major);
475 static BUS_ATTR(supported_features, S_IRUGO, rbd_supported_features_show, NULL);
477 static struct attribute *rbd_bus_attrs[] = {
479 &bus_attr_remove.attr,
480 &bus_attr_add_single_major.attr,
481 &bus_attr_remove_single_major.attr,
482 &bus_attr_supported_features.attr,
486 static umode_t rbd_bus_is_visible(struct kobject *kobj,
487 struct attribute *attr, int index)
490 (attr == &bus_attr_add_single_major.attr ||
491 attr == &bus_attr_remove_single_major.attr))
497 static const struct attribute_group rbd_bus_group = {
498 .attrs = rbd_bus_attrs,
499 .is_visible = rbd_bus_is_visible,
501 __ATTRIBUTE_GROUPS(rbd_bus);
503 static struct bus_type rbd_bus_type = {
505 .bus_groups = rbd_bus_groups,
508 static void rbd_root_dev_release(struct device *dev)
512 static struct device rbd_root_dev = {
514 .release = rbd_root_dev_release,
517 static __printf(2, 3)
518 void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...)
520 struct va_format vaf;
528 printk(KERN_WARNING "%s: %pV\n", RBD_DRV_NAME, &vaf);
529 else if (rbd_dev->disk)
530 printk(KERN_WARNING "%s: %s: %pV\n",
531 RBD_DRV_NAME, rbd_dev->disk->disk_name, &vaf);
532 else if (rbd_dev->spec && rbd_dev->spec->image_name)
533 printk(KERN_WARNING "%s: image %s: %pV\n",
534 RBD_DRV_NAME, rbd_dev->spec->image_name, &vaf);
535 else if (rbd_dev->spec && rbd_dev->spec->image_id)
536 printk(KERN_WARNING "%s: id %s: %pV\n",
537 RBD_DRV_NAME, rbd_dev->spec->image_id, &vaf);
539 printk(KERN_WARNING "%s: rbd_dev %p: %pV\n",
540 RBD_DRV_NAME, rbd_dev, &vaf);
545 #define rbd_assert(expr) \
546 if (unlikely(!(expr))) { \
547 printk(KERN_ERR "\nAssertion failure in %s() " \
549 "\trbd_assert(%s);\n\n", \
550 __func__, __LINE__, #expr); \
553 #else /* !RBD_DEBUG */
554 # define rbd_assert(expr) ((void) 0)
555 #endif /* !RBD_DEBUG */
557 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
559 static int rbd_dev_refresh(struct rbd_device *rbd_dev);
560 static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev);
561 static int rbd_dev_header_info(struct rbd_device *rbd_dev);
562 static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev);
563 static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
565 static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
566 u8 *order, u64 *snap_size);
567 static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
570 static int rbd_open(struct block_device *bdev, fmode_t mode)
572 struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
573 bool removing = false;
575 spin_lock_irq(&rbd_dev->lock);
576 if (test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags))
579 rbd_dev->open_count++;
580 spin_unlock_irq(&rbd_dev->lock);
584 (void) get_device(&rbd_dev->dev);
589 static void rbd_release(struct gendisk *disk, fmode_t mode)
591 struct rbd_device *rbd_dev = disk->private_data;
592 unsigned long open_count_before;
594 spin_lock_irq(&rbd_dev->lock);
595 open_count_before = rbd_dev->open_count--;
596 spin_unlock_irq(&rbd_dev->lock);
597 rbd_assert(open_count_before > 0);
599 put_device(&rbd_dev->dev);
602 static int rbd_ioctl_set_ro(struct rbd_device *rbd_dev, unsigned long arg)
606 if (get_user(ro, (int __user *)arg))
609 /* Snapshots can't be marked read-write */
610 if (rbd_dev->spec->snap_id != CEPH_NOSNAP && !ro)
613 /* Let blkdev_roset() handle it */
617 static int rbd_ioctl(struct block_device *bdev, fmode_t mode,
618 unsigned int cmd, unsigned long arg)
620 struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
625 ret = rbd_ioctl_set_ro(rbd_dev, arg);
635 static int rbd_compat_ioctl(struct block_device *bdev, fmode_t mode,
636 unsigned int cmd, unsigned long arg)
638 return rbd_ioctl(bdev, mode, cmd, arg);
640 #endif /* CONFIG_COMPAT */
642 static const struct block_device_operations rbd_bd_ops = {
643 .owner = THIS_MODULE,
645 .release = rbd_release,
648 .compat_ioctl = rbd_compat_ioctl,
653 * Initialize an rbd client instance. Success or not, this function
654 * consumes ceph_opts. Caller holds client_mutex.
656 static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts)
658 struct rbd_client *rbdc;
661 dout("%s:\n", __func__);
662 rbdc = kmalloc(sizeof(struct rbd_client), GFP_KERNEL);
666 kref_init(&rbdc->kref);
667 INIT_LIST_HEAD(&rbdc->node);
669 rbdc->client = ceph_create_client(ceph_opts, rbdc);
670 if (IS_ERR(rbdc->client))
672 ceph_opts = NULL; /* Now rbdc->client is responsible for ceph_opts */
674 ret = ceph_open_session(rbdc->client);
678 spin_lock(&rbd_client_list_lock);
679 list_add_tail(&rbdc->node, &rbd_client_list);
680 spin_unlock(&rbd_client_list_lock);
682 dout("%s: rbdc %p\n", __func__, rbdc);
686 ceph_destroy_client(rbdc->client);
691 ceph_destroy_options(ceph_opts);
692 dout("%s: error %d\n", __func__, ret);
697 static struct rbd_client *__rbd_get_client(struct rbd_client *rbdc)
699 kref_get(&rbdc->kref);
705 * Find a ceph client with specific addr and configuration. If
706 * found, bump its reference count.
708 static struct rbd_client *rbd_client_find(struct ceph_options *ceph_opts)
710 struct rbd_client *client_node;
713 if (ceph_opts->flags & CEPH_OPT_NOSHARE)
716 spin_lock(&rbd_client_list_lock);
717 list_for_each_entry(client_node, &rbd_client_list, node) {
718 if (!ceph_compare_options(ceph_opts, client_node->client)) {
719 __rbd_get_client(client_node);
725 spin_unlock(&rbd_client_list_lock);
727 return found ? client_node : NULL;
731 * (Per device) rbd map options
739 /* string args above */
748 static match_table_t rbd_opts_tokens = {
749 {Opt_queue_depth, "queue_depth=%d"},
750 {Opt_lock_timeout, "lock_timeout=%d"},
752 /* string args above */
753 {Opt_read_only, "read_only"},
754 {Opt_read_only, "ro"}, /* Alternate spelling */
755 {Opt_read_write, "read_write"},
756 {Opt_read_write, "rw"}, /* Alternate spelling */
757 {Opt_lock_on_read, "lock_on_read"},
758 {Opt_exclusive, "exclusive"},
759 {Opt_notrim, "notrim"},
765 unsigned long lock_timeout;
772 #define RBD_QUEUE_DEPTH_DEFAULT BLKDEV_MAX_RQ
773 #define RBD_LOCK_TIMEOUT_DEFAULT 0 /* no timeout */
774 #define RBD_READ_ONLY_DEFAULT false
775 #define RBD_LOCK_ON_READ_DEFAULT false
776 #define RBD_EXCLUSIVE_DEFAULT false
777 #define RBD_TRIM_DEFAULT true
779 static int parse_rbd_opts_token(char *c, void *private)
781 struct rbd_options *rbd_opts = private;
782 substring_t argstr[MAX_OPT_ARGS];
783 int token, intval, ret;
785 token = match_token(c, rbd_opts_tokens, argstr);
786 if (token < Opt_last_int) {
787 ret = match_int(&argstr[0], &intval);
789 pr_err("bad mount option arg (not int) at '%s'\n", c);
792 dout("got int token %d val %d\n", token, intval);
793 } else if (token > Opt_last_int && token < Opt_last_string) {
794 dout("got string token %d val %s\n", token, argstr[0].from);
796 dout("got token %d\n", token);
800 case Opt_queue_depth:
802 pr_err("queue_depth out of range\n");
805 rbd_opts->queue_depth = intval;
807 case Opt_lock_timeout:
808 /* 0 is "wait forever" (i.e. infinite timeout) */
809 if (intval < 0 || intval > INT_MAX / 1000) {
810 pr_err("lock_timeout out of range\n");
813 rbd_opts->lock_timeout = msecs_to_jiffies(intval * 1000);
816 rbd_opts->read_only = true;
819 rbd_opts->read_only = false;
821 case Opt_lock_on_read:
822 rbd_opts->lock_on_read = true;
825 rbd_opts->exclusive = true;
828 rbd_opts->trim = false;
831 /* libceph prints "bad option" msg */
838 static char* obj_op_name(enum obj_operation_type op_type)
853 * Destroy ceph client
855 * Caller must hold rbd_client_list_lock.
857 static void rbd_client_release(struct kref *kref)
859 struct rbd_client *rbdc = container_of(kref, struct rbd_client, kref);
861 dout("%s: rbdc %p\n", __func__, rbdc);
862 spin_lock(&rbd_client_list_lock);
863 list_del(&rbdc->node);
864 spin_unlock(&rbd_client_list_lock);
866 ceph_destroy_client(rbdc->client);
871 * Drop reference to ceph client node. If it's not referenced anymore, release
874 static void rbd_put_client(struct rbd_client *rbdc)
877 kref_put(&rbdc->kref, rbd_client_release);
880 static int wait_for_latest_osdmap(struct ceph_client *client)
885 ret = ceph_monc_get_version(&client->monc, "osdmap", &newest_epoch);
889 if (client->osdc.osdmap->epoch >= newest_epoch)
892 ceph_osdc_maybe_request_map(&client->osdc);
893 return ceph_monc_wait_osdmap(&client->monc, newest_epoch,
894 client->options->mount_timeout);
898 * Get a ceph client with specific addr and configuration, if one does
899 * not exist create it. Either way, ceph_opts is consumed by this
902 static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts)
904 struct rbd_client *rbdc;
907 mutex_lock_nested(&client_mutex, SINGLE_DEPTH_NESTING);
908 rbdc = rbd_client_find(ceph_opts);
910 ceph_destroy_options(ceph_opts);
913 * Using an existing client. Make sure ->pg_pools is up to
914 * date before we look up the pool id in do_rbd_add().
916 ret = wait_for_latest_osdmap(rbdc->client);
918 rbd_warn(NULL, "failed to get latest osdmap: %d", ret);
919 rbd_put_client(rbdc);
923 rbdc = rbd_client_create(ceph_opts);
925 mutex_unlock(&client_mutex);
930 static bool rbd_image_format_valid(u32 image_format)
932 return image_format == 1 || image_format == 2;
935 static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk *ondisk)
940 /* The header has to start with the magic rbd header text */
941 if (memcmp(&ondisk->text, RBD_HEADER_TEXT, sizeof (RBD_HEADER_TEXT)))
944 /* The bio layer requires at least sector-sized I/O */
946 if (ondisk->options.order < SECTOR_SHIFT)
949 /* If we use u64 in a few spots we may be able to loosen this */
951 if (ondisk->options.order > 8 * sizeof (int) - 1)
955 * The size of a snapshot header has to fit in a size_t, and
956 * that limits the number of snapshots.
958 snap_count = le32_to_cpu(ondisk->snap_count);
959 size = SIZE_MAX - sizeof (struct ceph_snap_context);
960 if (snap_count > size / sizeof (__le64))
964 * Not only that, but the size of the entire the snapshot
965 * header must also be representable in a size_t.
967 size -= snap_count * sizeof (__le64);
968 if ((u64) size < le64_to_cpu(ondisk->snap_names_len))
975 * returns the size of an object in the image
977 static u32 rbd_obj_bytes(struct rbd_image_header *header)
979 return 1U << header->obj_order;
982 static void rbd_init_layout(struct rbd_device *rbd_dev)
984 if (rbd_dev->header.stripe_unit == 0 ||
985 rbd_dev->header.stripe_count == 0) {
986 rbd_dev->header.stripe_unit = rbd_obj_bytes(&rbd_dev->header);
987 rbd_dev->header.stripe_count = 1;
990 rbd_dev->layout.stripe_unit = rbd_dev->header.stripe_unit;
991 rbd_dev->layout.stripe_count = rbd_dev->header.stripe_count;
992 rbd_dev->layout.object_size = rbd_obj_bytes(&rbd_dev->header);
993 rbd_dev->layout.pool_id = rbd_dev->header.data_pool_id == CEPH_NOPOOL ?
994 rbd_dev->spec->pool_id : rbd_dev->header.data_pool_id;
995 RCU_INIT_POINTER(rbd_dev->layout.pool_ns, NULL);
999 * Fill an rbd image header with information from the given format 1
1002 static int rbd_header_from_disk(struct rbd_device *rbd_dev,
1003 struct rbd_image_header_ondisk *ondisk)
1005 struct rbd_image_header *header = &rbd_dev->header;
1006 bool first_time = header->object_prefix == NULL;
1007 struct ceph_snap_context *snapc;
1008 char *object_prefix = NULL;
1009 char *snap_names = NULL;
1010 u64 *snap_sizes = NULL;
1015 /* Allocate this now to avoid having to handle failure below */
1018 object_prefix = kstrndup(ondisk->object_prefix,
1019 sizeof(ondisk->object_prefix),
1025 /* Allocate the snapshot context and fill it in */
1027 snap_count = le32_to_cpu(ondisk->snap_count);
1028 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
1031 snapc->seq = le64_to_cpu(ondisk->snap_seq);
1033 struct rbd_image_snap_ondisk *snaps;
1034 u64 snap_names_len = le64_to_cpu(ondisk->snap_names_len);
1036 /* We'll keep a copy of the snapshot names... */
1038 if (snap_names_len > (u64)SIZE_MAX)
1040 snap_names = kmalloc(snap_names_len, GFP_KERNEL);
1044 /* ...as well as the array of their sizes. */
1045 snap_sizes = kmalloc_array(snap_count,
1046 sizeof(*header->snap_sizes),
1052 * Copy the names, and fill in each snapshot's id
1055 * Note that rbd_dev_v1_header_info() guarantees the
1056 * ondisk buffer we're working with has
1057 * snap_names_len bytes beyond the end of the
1058 * snapshot id array, this memcpy() is safe.
1060 memcpy(snap_names, &ondisk->snaps[snap_count], snap_names_len);
1061 snaps = ondisk->snaps;
1062 for (i = 0; i < snap_count; i++) {
1063 snapc->snaps[i] = le64_to_cpu(snaps[i].id);
1064 snap_sizes[i] = le64_to_cpu(snaps[i].image_size);
1068 /* We won't fail any more, fill in the header */
1071 header->object_prefix = object_prefix;
1072 header->obj_order = ondisk->options.order;
1073 rbd_init_layout(rbd_dev);
1075 ceph_put_snap_context(header->snapc);
1076 kfree(header->snap_names);
1077 kfree(header->snap_sizes);
1080 /* The remaining fields always get updated (when we refresh) */
1082 header->image_size = le64_to_cpu(ondisk->image_size);
1083 header->snapc = snapc;
1084 header->snap_names = snap_names;
1085 header->snap_sizes = snap_sizes;
1093 ceph_put_snap_context(snapc);
1094 kfree(object_prefix);
1099 static const char *_rbd_dev_v1_snap_name(struct rbd_device *rbd_dev, u32 which)
1101 const char *snap_name;
1103 rbd_assert(which < rbd_dev->header.snapc->num_snaps);
1105 /* Skip over names until we find the one we are looking for */
1107 snap_name = rbd_dev->header.snap_names;
1109 snap_name += strlen(snap_name) + 1;
1111 return kstrdup(snap_name, GFP_KERNEL);
1115 * Snapshot id comparison function for use with qsort()/bsearch().
1116 * Note that result is for snapshots in *descending* order.
1118 static int snapid_compare_reverse(const void *s1, const void *s2)
1120 u64 snap_id1 = *(u64 *)s1;
1121 u64 snap_id2 = *(u64 *)s2;
1123 if (snap_id1 < snap_id2)
1125 return snap_id1 == snap_id2 ? 0 : -1;
1129 * Search a snapshot context to see if the given snapshot id is
1132 * Returns the position of the snapshot id in the array if it's found,
1133 * or BAD_SNAP_INDEX otherwise.
1135 * Note: The snapshot array is in kept sorted (by the osd) in
1136 * reverse order, highest snapshot id first.
1138 static u32 rbd_dev_snap_index(struct rbd_device *rbd_dev, u64 snap_id)
1140 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
1143 found = bsearch(&snap_id, &snapc->snaps, snapc->num_snaps,
1144 sizeof (snap_id), snapid_compare_reverse);
1146 return found ? (u32)(found - &snapc->snaps[0]) : BAD_SNAP_INDEX;
1149 static const char *rbd_dev_v1_snap_name(struct rbd_device *rbd_dev,
1153 const char *snap_name;
1155 which = rbd_dev_snap_index(rbd_dev, snap_id);
1156 if (which == BAD_SNAP_INDEX)
1157 return ERR_PTR(-ENOENT);
1159 snap_name = _rbd_dev_v1_snap_name(rbd_dev, which);
1160 return snap_name ? snap_name : ERR_PTR(-ENOMEM);
1163 static const char *rbd_snap_name(struct rbd_device *rbd_dev, u64 snap_id)
1165 if (snap_id == CEPH_NOSNAP)
1166 return RBD_SNAP_HEAD_NAME;
1168 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1169 if (rbd_dev->image_format == 1)
1170 return rbd_dev_v1_snap_name(rbd_dev, snap_id);
1172 return rbd_dev_v2_snap_name(rbd_dev, snap_id);
1175 static int rbd_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
1178 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1179 if (snap_id == CEPH_NOSNAP) {
1180 *snap_size = rbd_dev->header.image_size;
1181 } else if (rbd_dev->image_format == 1) {
1184 which = rbd_dev_snap_index(rbd_dev, snap_id);
1185 if (which == BAD_SNAP_INDEX)
1188 *snap_size = rbd_dev->header.snap_sizes[which];
1193 ret = _rbd_dev_v2_snap_size(rbd_dev, snap_id, NULL, &size);
1202 static int rbd_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
1205 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1206 if (snap_id == CEPH_NOSNAP) {
1207 *snap_features = rbd_dev->header.features;
1208 } else if (rbd_dev->image_format == 1) {
1209 *snap_features = 0; /* No features for format 1 */
1214 ret = _rbd_dev_v2_snap_features(rbd_dev, snap_id, &features);
1218 *snap_features = features;
1223 static int rbd_dev_mapping_set(struct rbd_device *rbd_dev)
1225 u64 snap_id = rbd_dev->spec->snap_id;
1230 ret = rbd_snap_size(rbd_dev, snap_id, &size);
1233 ret = rbd_snap_features(rbd_dev, snap_id, &features);
1237 rbd_dev->mapping.size = size;
1238 rbd_dev->mapping.features = features;
1243 static void rbd_dev_mapping_clear(struct rbd_device *rbd_dev)
1245 rbd_dev->mapping.size = 0;
1246 rbd_dev->mapping.features = 0;
1249 static void zero_bvec(struct bio_vec *bv)
1252 unsigned long flags;
1254 buf = bvec_kmap_irq(bv, &flags);
1255 memset(buf, 0, bv->bv_len);
1256 flush_dcache_page(bv->bv_page);
1257 bvec_kunmap_irq(buf, &flags);
1260 static void zero_bios(struct ceph_bio_iter *bio_pos, u32 off, u32 bytes)
1262 struct ceph_bio_iter it = *bio_pos;
1264 ceph_bio_iter_advance(&it, off);
1265 ceph_bio_iter_advance_step(&it, bytes, ({
1270 static void zero_bvecs(struct ceph_bvec_iter *bvec_pos, u32 off, u32 bytes)
1272 struct ceph_bvec_iter it = *bvec_pos;
1274 ceph_bvec_iter_advance(&it, off);
1275 ceph_bvec_iter_advance_step(&it, bytes, ({
1281 * Zero a range in @obj_req data buffer defined by a bio (list) or
1282 * (private) bio_vec array.
1284 * @off is relative to the start of the data buffer.
1286 static void rbd_obj_zero_range(struct rbd_obj_request *obj_req, u32 off,
1289 switch (obj_req->img_request->data_type) {
1290 case OBJ_REQUEST_BIO:
1291 zero_bios(&obj_req->bio_pos, off, bytes);
1293 case OBJ_REQUEST_BVECS:
1294 case OBJ_REQUEST_OWN_BVECS:
1295 zero_bvecs(&obj_req->bvec_pos, off, bytes);
1302 static void rbd_obj_request_destroy(struct kref *kref);
1303 static void rbd_obj_request_put(struct rbd_obj_request *obj_request)
1305 rbd_assert(obj_request != NULL);
1306 dout("%s: obj %p (was %d)\n", __func__, obj_request,
1307 kref_read(&obj_request->kref));
1308 kref_put(&obj_request->kref, rbd_obj_request_destroy);
1311 static void rbd_img_request_get(struct rbd_img_request *img_request)
1313 dout("%s: img %p (was %d)\n", __func__, img_request,
1314 kref_read(&img_request->kref));
1315 kref_get(&img_request->kref);
1318 static void rbd_img_request_destroy(struct kref *kref);
1319 static void rbd_img_request_put(struct rbd_img_request *img_request)
1321 rbd_assert(img_request != NULL);
1322 dout("%s: img %p (was %d)\n", __func__, img_request,
1323 kref_read(&img_request->kref));
1324 kref_put(&img_request->kref, rbd_img_request_destroy);
1327 static inline void rbd_img_obj_request_add(struct rbd_img_request *img_request,
1328 struct rbd_obj_request *obj_request)
1330 rbd_assert(obj_request->img_request == NULL);
1332 /* Image request now owns object's original reference */
1333 obj_request->img_request = img_request;
1334 img_request->obj_request_count++;
1335 img_request->pending_count++;
1336 dout("%s: img %p obj %p\n", __func__, img_request, obj_request);
1339 static inline void rbd_img_obj_request_del(struct rbd_img_request *img_request,
1340 struct rbd_obj_request *obj_request)
1342 dout("%s: img %p obj %p\n", __func__, img_request, obj_request);
1343 list_del(&obj_request->ex.oe_item);
1344 rbd_assert(img_request->obj_request_count > 0);
1345 img_request->obj_request_count--;
1346 rbd_assert(obj_request->img_request == img_request);
1347 rbd_obj_request_put(obj_request);
1350 static void rbd_obj_request_submit(struct rbd_obj_request *obj_request)
1352 struct ceph_osd_request *osd_req = obj_request->osd_req;
1354 dout("%s %p object_no %016llx %llu~%llu osd_req %p\n", __func__,
1355 obj_request, obj_request->ex.oe_objno, obj_request->ex.oe_off,
1356 obj_request->ex.oe_len, osd_req);
1357 ceph_osdc_start_request(osd_req->r_osdc, osd_req, false);
1361 * The default/initial value for all image request flags is 0. Each
1362 * is conditionally set to 1 at image request initialization time
1363 * and currently never change thereafter.
1365 static void img_request_layered_set(struct rbd_img_request *img_request)
1367 set_bit(IMG_REQ_LAYERED, &img_request->flags);
1371 static void img_request_layered_clear(struct rbd_img_request *img_request)
1373 clear_bit(IMG_REQ_LAYERED, &img_request->flags);
1377 static bool img_request_layered_test(struct rbd_img_request *img_request)
1380 return test_bit(IMG_REQ_LAYERED, &img_request->flags) != 0;
1383 static bool rbd_obj_is_entire(struct rbd_obj_request *obj_req)
1385 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
1387 return !obj_req->ex.oe_off &&
1388 obj_req->ex.oe_len == rbd_dev->layout.object_size;
1391 static bool rbd_obj_is_tail(struct rbd_obj_request *obj_req)
1393 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
1395 return obj_req->ex.oe_off + obj_req->ex.oe_len ==
1396 rbd_dev->layout.object_size;
1399 static u64 rbd_obj_img_extents_bytes(struct rbd_obj_request *obj_req)
1401 return ceph_file_extents_bytes(obj_req->img_extents,
1402 obj_req->num_img_extents);
1405 static bool rbd_img_is_write(struct rbd_img_request *img_req)
1407 switch (img_req->op_type) {
1411 case OBJ_OP_DISCARD:
1418 static void rbd_obj_handle_request(struct rbd_obj_request *obj_req);
1420 static void rbd_osd_req_callback(struct ceph_osd_request *osd_req)
1422 struct rbd_obj_request *obj_req = osd_req->r_priv;
1424 dout("%s osd_req %p result %d for obj_req %p\n", __func__, osd_req,
1425 osd_req->r_result, obj_req);
1426 rbd_assert(osd_req == obj_req->osd_req);
1428 obj_req->result = osd_req->r_result < 0 ? osd_req->r_result : 0;
1429 if (!obj_req->result && !rbd_img_is_write(obj_req->img_request))
1430 obj_req->xferred = osd_req->r_result;
1433 * Writes aren't allowed to return a data payload. In some
1434 * guarded write cases (e.g. stat + zero on an empty object)
1435 * a stat response makes it through, but we don't care.
1437 obj_req->xferred = 0;
1439 rbd_obj_handle_request(obj_req);
1442 static void rbd_osd_req_format_read(struct rbd_obj_request *obj_request)
1444 struct ceph_osd_request *osd_req = obj_request->osd_req;
1446 osd_req->r_flags = CEPH_OSD_FLAG_READ;
1447 osd_req->r_snapid = obj_request->img_request->snap_id;
1450 static void rbd_osd_req_format_write(struct rbd_obj_request *obj_request)
1452 struct ceph_osd_request *osd_req = obj_request->osd_req;
1454 osd_req->r_flags = CEPH_OSD_FLAG_WRITE;
1455 ktime_get_real_ts(&osd_req->r_mtime);
1456 osd_req->r_data_offset = obj_request->ex.oe_off;
1459 static struct ceph_osd_request *
1460 rbd_osd_req_create(struct rbd_obj_request *obj_req, unsigned int num_ops)
1462 struct rbd_img_request *img_req = obj_req->img_request;
1463 struct rbd_device *rbd_dev = img_req->rbd_dev;
1464 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
1465 struct ceph_osd_request *req;
1466 const char *name_format = rbd_dev->image_format == 1 ?
1467 RBD_V1_DATA_FORMAT : RBD_V2_DATA_FORMAT;
1469 req = ceph_osdc_alloc_request(osdc,
1470 (rbd_img_is_write(img_req) ? img_req->snapc : NULL),
1471 num_ops, false, GFP_NOIO);
1475 req->r_callback = rbd_osd_req_callback;
1476 req->r_priv = obj_req;
1478 req->r_base_oloc.pool = rbd_dev->layout.pool_id;
1479 if (ceph_oid_aprintf(&req->r_base_oid, GFP_NOIO, name_format,
1480 rbd_dev->header.object_prefix, obj_req->ex.oe_objno))
1483 if (ceph_osdc_alloc_messages(req, GFP_NOIO))
1489 ceph_osdc_put_request(req);
1493 static void rbd_osd_req_destroy(struct ceph_osd_request *osd_req)
1495 ceph_osdc_put_request(osd_req);
1498 static struct rbd_obj_request *rbd_obj_request_create(void)
1500 struct rbd_obj_request *obj_request;
1502 obj_request = kmem_cache_zalloc(rbd_obj_request_cache, GFP_NOIO);
1506 ceph_object_extent_init(&obj_request->ex);
1507 kref_init(&obj_request->kref);
1509 dout("%s %p\n", __func__, obj_request);
1513 static void rbd_obj_request_destroy(struct kref *kref)
1515 struct rbd_obj_request *obj_request;
1518 obj_request = container_of(kref, struct rbd_obj_request, kref);
1520 dout("%s: obj %p\n", __func__, obj_request);
1522 if (obj_request->osd_req)
1523 rbd_osd_req_destroy(obj_request->osd_req);
1525 switch (obj_request->img_request->data_type) {
1526 case OBJ_REQUEST_NODATA:
1527 case OBJ_REQUEST_BIO:
1528 case OBJ_REQUEST_BVECS:
1529 break; /* Nothing to do */
1530 case OBJ_REQUEST_OWN_BVECS:
1531 kfree(obj_request->bvec_pos.bvecs);
1537 kfree(obj_request->img_extents);
1538 if (obj_request->copyup_bvecs) {
1539 for (i = 0; i < obj_request->copyup_bvec_count; i++) {
1540 if (obj_request->copyup_bvecs[i].bv_page)
1541 __free_page(obj_request->copyup_bvecs[i].bv_page);
1543 kfree(obj_request->copyup_bvecs);
1546 kmem_cache_free(rbd_obj_request_cache, obj_request);
1549 /* It's OK to call this for a device with no parent */
1551 static void rbd_spec_put(struct rbd_spec *spec);
1552 static void rbd_dev_unparent(struct rbd_device *rbd_dev)
1554 rbd_dev_remove_parent(rbd_dev);
1555 rbd_spec_put(rbd_dev->parent_spec);
1556 rbd_dev->parent_spec = NULL;
1557 rbd_dev->parent_overlap = 0;
1561 * Parent image reference counting is used to determine when an
1562 * image's parent fields can be safely torn down--after there are no
1563 * more in-flight requests to the parent image. When the last
1564 * reference is dropped, cleaning them up is safe.
1566 static void rbd_dev_parent_put(struct rbd_device *rbd_dev)
1570 if (!rbd_dev->parent_spec)
1573 counter = atomic_dec_return_safe(&rbd_dev->parent_ref);
1577 /* Last reference; clean up parent data structures */
1580 rbd_dev_unparent(rbd_dev);
1582 rbd_warn(rbd_dev, "parent reference underflow");
1586 * If an image has a non-zero parent overlap, get a reference to its
1589 * Returns true if the rbd device has a parent with a non-zero
1590 * overlap and a reference for it was successfully taken, or
1593 static bool rbd_dev_parent_get(struct rbd_device *rbd_dev)
1597 if (!rbd_dev->parent_spec)
1600 down_read(&rbd_dev->header_rwsem);
1601 if (rbd_dev->parent_overlap)
1602 counter = atomic_inc_return_safe(&rbd_dev->parent_ref);
1603 up_read(&rbd_dev->header_rwsem);
1606 rbd_warn(rbd_dev, "parent reference overflow");
1612 * Caller is responsible for filling in the list of object requests
1613 * that comprises the image request, and the Linux request pointer
1614 * (if there is one).
1616 static struct rbd_img_request *rbd_img_request_create(
1617 struct rbd_device *rbd_dev,
1618 enum obj_operation_type op_type,
1619 struct ceph_snap_context *snapc)
1621 struct rbd_img_request *img_request;
1623 img_request = kmem_cache_zalloc(rbd_img_request_cache, GFP_NOIO);
1627 img_request->rbd_dev = rbd_dev;
1628 img_request->op_type = op_type;
1629 if (!rbd_img_is_write(img_request))
1630 img_request->snap_id = rbd_dev->spec->snap_id;
1632 img_request->snapc = snapc;
1634 if (rbd_dev_parent_get(rbd_dev))
1635 img_request_layered_set(img_request);
1637 spin_lock_init(&img_request->completion_lock);
1638 INIT_LIST_HEAD(&img_request->object_extents);
1639 kref_init(&img_request->kref);
1641 dout("%s: rbd_dev %p %s -> img %p\n", __func__, rbd_dev,
1642 obj_op_name(op_type), img_request);
1646 static void rbd_img_request_destroy(struct kref *kref)
1648 struct rbd_img_request *img_request;
1649 struct rbd_obj_request *obj_request;
1650 struct rbd_obj_request *next_obj_request;
1652 img_request = container_of(kref, struct rbd_img_request, kref);
1654 dout("%s: img %p\n", __func__, img_request);
1656 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
1657 rbd_img_obj_request_del(img_request, obj_request);
1658 rbd_assert(img_request->obj_request_count == 0);
1660 if (img_request_layered_test(img_request)) {
1661 img_request_layered_clear(img_request);
1662 rbd_dev_parent_put(img_request->rbd_dev);
1665 if (rbd_img_is_write(img_request))
1666 ceph_put_snap_context(img_request->snapc);
1668 kmem_cache_free(rbd_img_request_cache, img_request);
1671 static void prune_extents(struct ceph_file_extent *img_extents,
1672 u32 *num_img_extents, u64 overlap)
1674 u32 cnt = *num_img_extents;
1676 /* drop extents completely beyond the overlap */
1677 while (cnt && img_extents[cnt - 1].fe_off >= overlap)
1681 struct ceph_file_extent *ex = &img_extents[cnt - 1];
1683 /* trim final overlapping extent */
1684 if (ex->fe_off + ex->fe_len > overlap)
1685 ex->fe_len = overlap - ex->fe_off;
1688 *num_img_extents = cnt;
1692 * Determine the byte range(s) covered by either just the object extent
1693 * or the entire object in the parent image.
1695 static int rbd_obj_calc_img_extents(struct rbd_obj_request *obj_req,
1698 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
1701 if (!rbd_dev->parent_overlap)
1704 ret = ceph_extent_to_file(&rbd_dev->layout, obj_req->ex.oe_objno,
1705 entire ? 0 : obj_req->ex.oe_off,
1706 entire ? rbd_dev->layout.object_size :
1708 &obj_req->img_extents,
1709 &obj_req->num_img_extents);
1713 prune_extents(obj_req->img_extents, &obj_req->num_img_extents,
1714 rbd_dev->parent_overlap);
1718 static void rbd_osd_req_setup_data(struct rbd_obj_request *obj_req, u32 which)
1720 switch (obj_req->img_request->data_type) {
1721 case OBJ_REQUEST_BIO:
1722 osd_req_op_extent_osd_data_bio(obj_req->osd_req, which,
1724 obj_req->ex.oe_len);
1726 case OBJ_REQUEST_BVECS:
1727 case OBJ_REQUEST_OWN_BVECS:
1728 rbd_assert(obj_req->bvec_pos.iter.bi_size ==
1729 obj_req->ex.oe_len);
1730 rbd_assert(obj_req->bvec_idx == obj_req->bvec_count);
1731 osd_req_op_extent_osd_data_bvec_pos(obj_req->osd_req, which,
1732 &obj_req->bvec_pos);
1739 static int rbd_obj_setup_read(struct rbd_obj_request *obj_req)
1741 obj_req->osd_req = rbd_osd_req_create(obj_req, 1);
1742 if (!obj_req->osd_req)
1745 osd_req_op_extent_init(obj_req->osd_req, 0, CEPH_OSD_OP_READ,
1746 obj_req->ex.oe_off, obj_req->ex.oe_len, 0, 0);
1747 rbd_osd_req_setup_data(obj_req, 0);
1749 rbd_osd_req_format_read(obj_req);
1753 static int __rbd_obj_setup_stat(struct rbd_obj_request *obj_req,
1756 struct page **pages;
1759 * The response data for a STAT call consists of:
1766 pages = ceph_alloc_page_vector(1, GFP_NOIO);
1768 return PTR_ERR(pages);
1770 osd_req_op_init(obj_req->osd_req, which, CEPH_OSD_OP_STAT, 0);
1771 osd_req_op_raw_data_in_pages(obj_req->osd_req, which, pages,
1772 8 + sizeof(struct ceph_timespec),
1777 static void __rbd_obj_setup_write(struct rbd_obj_request *obj_req,
1780 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
1783 osd_req_op_alloc_hint_init(obj_req->osd_req, which++,
1784 rbd_dev->layout.object_size,
1785 rbd_dev->layout.object_size);
1787 if (rbd_obj_is_entire(obj_req))
1788 opcode = CEPH_OSD_OP_WRITEFULL;
1790 opcode = CEPH_OSD_OP_WRITE;
1792 osd_req_op_extent_init(obj_req->osd_req, which, opcode,
1793 obj_req->ex.oe_off, obj_req->ex.oe_len, 0, 0);
1794 rbd_osd_req_setup_data(obj_req, which++);
1796 rbd_assert(which == obj_req->osd_req->r_num_ops);
1797 rbd_osd_req_format_write(obj_req);
1800 static int rbd_obj_setup_write(struct rbd_obj_request *obj_req)
1802 unsigned int num_osd_ops, which = 0;
1805 /* reverse map the entire object onto the parent */
1806 ret = rbd_obj_calc_img_extents(obj_req, true);
1810 if (obj_req->num_img_extents) {
1811 obj_req->write_state = RBD_OBJ_WRITE_GUARD;
1812 num_osd_ops = 3; /* stat + setallochint + write/writefull */
1814 obj_req->write_state = RBD_OBJ_WRITE_FLAT;
1815 num_osd_ops = 2; /* setallochint + write/writefull */
1818 obj_req->osd_req = rbd_osd_req_create(obj_req, num_osd_ops);
1819 if (!obj_req->osd_req)
1822 if (obj_req->num_img_extents) {
1823 ret = __rbd_obj_setup_stat(obj_req, which++);
1828 __rbd_obj_setup_write(obj_req, which);
1832 static void __rbd_obj_setup_discard(struct rbd_obj_request *obj_req,
1837 if (rbd_obj_is_entire(obj_req)) {
1838 if (obj_req->num_img_extents) {
1839 osd_req_op_init(obj_req->osd_req, which++,
1840 CEPH_OSD_OP_CREATE, 0);
1841 opcode = CEPH_OSD_OP_TRUNCATE;
1843 osd_req_op_init(obj_req->osd_req, which++,
1844 CEPH_OSD_OP_DELETE, 0);
1847 } else if (rbd_obj_is_tail(obj_req)) {
1848 opcode = CEPH_OSD_OP_TRUNCATE;
1850 opcode = CEPH_OSD_OP_ZERO;
1854 osd_req_op_extent_init(obj_req->osd_req, which++, opcode,
1855 obj_req->ex.oe_off, obj_req->ex.oe_len,
1858 rbd_assert(which == obj_req->osd_req->r_num_ops);
1859 rbd_osd_req_format_write(obj_req);
1862 static int rbd_obj_setup_discard(struct rbd_obj_request *obj_req)
1864 unsigned int num_osd_ops, which = 0;
1867 /* reverse map the entire object onto the parent */
1868 ret = rbd_obj_calc_img_extents(obj_req, true);
1872 if (rbd_obj_is_entire(obj_req)) {
1873 obj_req->write_state = RBD_OBJ_WRITE_FLAT;
1874 if (obj_req->num_img_extents)
1875 num_osd_ops = 2; /* create + truncate */
1877 num_osd_ops = 1; /* delete */
1879 if (obj_req->num_img_extents) {
1880 obj_req->write_state = RBD_OBJ_WRITE_GUARD;
1881 num_osd_ops = 2; /* stat + truncate/zero */
1883 obj_req->write_state = RBD_OBJ_WRITE_FLAT;
1884 num_osd_ops = 1; /* truncate/zero */
1888 obj_req->osd_req = rbd_osd_req_create(obj_req, num_osd_ops);
1889 if (!obj_req->osd_req)
1892 if (!rbd_obj_is_entire(obj_req) && obj_req->num_img_extents) {
1893 ret = __rbd_obj_setup_stat(obj_req, which++);
1898 __rbd_obj_setup_discard(obj_req, which);
1903 * For each object request in @img_req, allocate an OSD request, add
1904 * individual OSD ops and prepare them for submission. The number of
1905 * OSD ops depends on op_type and the overlap point (if any).
1907 static int __rbd_img_fill_request(struct rbd_img_request *img_req)
1909 struct rbd_obj_request *obj_req;
1912 for_each_obj_request(img_req, obj_req) {
1913 switch (img_req->op_type) {
1915 ret = rbd_obj_setup_read(obj_req);
1918 ret = rbd_obj_setup_write(obj_req);
1920 case OBJ_OP_DISCARD:
1921 ret = rbd_obj_setup_discard(obj_req);
1933 union rbd_img_fill_iter {
1934 struct ceph_bio_iter bio_iter;
1935 struct ceph_bvec_iter bvec_iter;
1938 struct rbd_img_fill_ctx {
1939 enum obj_request_type pos_type;
1940 union rbd_img_fill_iter *pos;
1941 union rbd_img_fill_iter iter;
1942 ceph_object_extent_fn_t set_pos_fn;
1943 ceph_object_extent_fn_t count_fn;
1944 ceph_object_extent_fn_t copy_fn;
1947 static struct ceph_object_extent *alloc_object_extent(void *arg)
1949 struct rbd_img_request *img_req = arg;
1950 struct rbd_obj_request *obj_req;
1952 obj_req = rbd_obj_request_create();
1956 rbd_img_obj_request_add(img_req, obj_req);
1957 return &obj_req->ex;
1961 * While su != os && sc == 1 is technically not fancy (it's the same
1962 * layout as su == os && sc == 1), we can't use the nocopy path for it
1963 * because ->set_pos_fn() should be called only once per object.
1964 * ceph_file_to_extents() invokes action_fn once per stripe unit, so
1965 * treat su != os && sc == 1 as fancy.
1967 static bool rbd_layout_is_fancy(struct ceph_file_layout *l)
1969 return l->stripe_unit != l->object_size;
1972 static int rbd_img_fill_request_nocopy(struct rbd_img_request *img_req,
1973 struct ceph_file_extent *img_extents,
1974 u32 num_img_extents,
1975 struct rbd_img_fill_ctx *fctx)
1980 img_req->data_type = fctx->pos_type;
1983 * Create object requests and set each object request's starting
1984 * position in the provided bio (list) or bio_vec array.
1986 fctx->iter = *fctx->pos;
1987 for (i = 0; i < num_img_extents; i++) {
1988 ret = ceph_file_to_extents(&img_req->rbd_dev->layout,
1989 img_extents[i].fe_off,
1990 img_extents[i].fe_len,
1991 &img_req->object_extents,
1992 alloc_object_extent, img_req,
1993 fctx->set_pos_fn, &fctx->iter);
1998 return __rbd_img_fill_request(img_req);
2002 * Map a list of image extents to a list of object extents, create the
2003 * corresponding object requests (normally each to a different object,
2004 * but not always) and add them to @img_req. For each object request,
2005 * set up its data descriptor to point to the corresponding chunk(s) of
2006 * @fctx->pos data buffer.
2008 * Because ceph_file_to_extents() will merge adjacent object extents
2009 * together, each object request's data descriptor may point to multiple
2010 * different chunks of @fctx->pos data buffer.
2012 * @fctx->pos data buffer is assumed to be large enough.
2014 static int rbd_img_fill_request(struct rbd_img_request *img_req,
2015 struct ceph_file_extent *img_extents,
2016 u32 num_img_extents,
2017 struct rbd_img_fill_ctx *fctx)
2019 struct rbd_device *rbd_dev = img_req->rbd_dev;
2020 struct rbd_obj_request *obj_req;
2024 if (fctx->pos_type == OBJ_REQUEST_NODATA ||
2025 !rbd_layout_is_fancy(&rbd_dev->layout))
2026 return rbd_img_fill_request_nocopy(img_req, img_extents,
2027 num_img_extents, fctx);
2029 img_req->data_type = OBJ_REQUEST_OWN_BVECS;
2032 * Create object requests and determine ->bvec_count for each object
2033 * request. Note that ->bvec_count sum over all object requests may
2034 * be greater than the number of bio_vecs in the provided bio (list)
2035 * or bio_vec array because when mapped, those bio_vecs can straddle
2036 * stripe unit boundaries.
2038 fctx->iter = *fctx->pos;
2039 for (i = 0; i < num_img_extents; i++) {
2040 ret = ceph_file_to_extents(&rbd_dev->layout,
2041 img_extents[i].fe_off,
2042 img_extents[i].fe_len,
2043 &img_req->object_extents,
2044 alloc_object_extent, img_req,
2045 fctx->count_fn, &fctx->iter);
2050 for_each_obj_request(img_req, obj_req) {
2051 obj_req->bvec_pos.bvecs = kmalloc_array(obj_req->bvec_count,
2052 sizeof(*obj_req->bvec_pos.bvecs),
2054 if (!obj_req->bvec_pos.bvecs)
2059 * Fill in each object request's private bio_vec array, splitting and
2060 * rearranging the provided bio_vecs in stripe unit chunks as needed.
2062 fctx->iter = *fctx->pos;
2063 for (i = 0; i < num_img_extents; i++) {
2064 ret = ceph_iterate_extents(&rbd_dev->layout,
2065 img_extents[i].fe_off,
2066 img_extents[i].fe_len,
2067 &img_req->object_extents,
2068 fctx->copy_fn, &fctx->iter);
2073 return __rbd_img_fill_request(img_req);
2076 static int rbd_img_fill_nodata(struct rbd_img_request *img_req,
2079 struct ceph_file_extent ex = { off, len };
2080 union rbd_img_fill_iter dummy;
2081 struct rbd_img_fill_ctx fctx = {
2082 .pos_type = OBJ_REQUEST_NODATA,
2086 return rbd_img_fill_request(img_req, &ex, 1, &fctx);
2089 static void set_bio_pos(struct ceph_object_extent *ex, u32 bytes, void *arg)
2091 struct rbd_obj_request *obj_req =
2092 container_of(ex, struct rbd_obj_request, ex);
2093 struct ceph_bio_iter *it = arg;
2095 dout("%s objno %llu bytes %u\n", __func__, ex->oe_objno, bytes);
2096 obj_req->bio_pos = *it;
2097 ceph_bio_iter_advance(it, bytes);
2100 static void count_bio_bvecs(struct ceph_object_extent *ex, u32 bytes, void *arg)
2102 struct rbd_obj_request *obj_req =
2103 container_of(ex, struct rbd_obj_request, ex);
2104 struct ceph_bio_iter *it = arg;
2106 dout("%s objno %llu bytes %u\n", __func__, ex->oe_objno, bytes);
2107 ceph_bio_iter_advance_step(it, bytes, ({
2108 obj_req->bvec_count++;
2113 static void copy_bio_bvecs(struct ceph_object_extent *ex, u32 bytes, void *arg)
2115 struct rbd_obj_request *obj_req =
2116 container_of(ex, struct rbd_obj_request, ex);
2117 struct ceph_bio_iter *it = arg;
2119 dout("%s objno %llu bytes %u\n", __func__, ex->oe_objno, bytes);
2120 ceph_bio_iter_advance_step(it, bytes, ({
2121 obj_req->bvec_pos.bvecs[obj_req->bvec_idx++] = bv;
2122 obj_req->bvec_pos.iter.bi_size += bv.bv_len;
2126 static int __rbd_img_fill_from_bio(struct rbd_img_request *img_req,
2127 struct ceph_file_extent *img_extents,
2128 u32 num_img_extents,
2129 struct ceph_bio_iter *bio_pos)
2131 struct rbd_img_fill_ctx fctx = {
2132 .pos_type = OBJ_REQUEST_BIO,
2133 .pos = (union rbd_img_fill_iter *)bio_pos,
2134 .set_pos_fn = set_bio_pos,
2135 .count_fn = count_bio_bvecs,
2136 .copy_fn = copy_bio_bvecs,
2139 return rbd_img_fill_request(img_req, img_extents, num_img_extents,
2143 static int rbd_img_fill_from_bio(struct rbd_img_request *img_req,
2144 u64 off, u64 len, struct bio *bio)
2146 struct ceph_file_extent ex = { off, len };
2147 struct ceph_bio_iter it = { .bio = bio, .iter = bio->bi_iter };
2149 return __rbd_img_fill_from_bio(img_req, &ex, 1, &it);
2152 static void set_bvec_pos(struct ceph_object_extent *ex, u32 bytes, void *arg)
2154 struct rbd_obj_request *obj_req =
2155 container_of(ex, struct rbd_obj_request, ex);
2156 struct ceph_bvec_iter *it = arg;
2158 obj_req->bvec_pos = *it;
2159 ceph_bvec_iter_shorten(&obj_req->bvec_pos, bytes);
2160 ceph_bvec_iter_advance(it, bytes);
2163 static void count_bvecs(struct ceph_object_extent *ex, u32 bytes, void *arg)
2165 struct rbd_obj_request *obj_req =
2166 container_of(ex, struct rbd_obj_request, ex);
2167 struct ceph_bvec_iter *it = arg;
2169 ceph_bvec_iter_advance_step(it, bytes, ({
2170 obj_req->bvec_count++;
2174 static void copy_bvecs(struct ceph_object_extent *ex, u32 bytes, void *arg)
2176 struct rbd_obj_request *obj_req =
2177 container_of(ex, struct rbd_obj_request, ex);
2178 struct ceph_bvec_iter *it = arg;
2180 ceph_bvec_iter_advance_step(it, bytes, ({
2181 obj_req->bvec_pos.bvecs[obj_req->bvec_idx++] = bv;
2182 obj_req->bvec_pos.iter.bi_size += bv.bv_len;
2186 static int __rbd_img_fill_from_bvecs(struct rbd_img_request *img_req,
2187 struct ceph_file_extent *img_extents,
2188 u32 num_img_extents,
2189 struct ceph_bvec_iter *bvec_pos)
2191 struct rbd_img_fill_ctx fctx = {
2192 .pos_type = OBJ_REQUEST_BVECS,
2193 .pos = (union rbd_img_fill_iter *)bvec_pos,
2194 .set_pos_fn = set_bvec_pos,
2195 .count_fn = count_bvecs,
2196 .copy_fn = copy_bvecs,
2199 return rbd_img_fill_request(img_req, img_extents, num_img_extents,
2203 static int rbd_img_fill_from_bvecs(struct rbd_img_request *img_req,
2204 struct ceph_file_extent *img_extents,
2205 u32 num_img_extents,
2206 struct bio_vec *bvecs)
2208 struct ceph_bvec_iter it = {
2210 .iter = { .bi_size = ceph_file_extents_bytes(img_extents,
2214 return __rbd_img_fill_from_bvecs(img_req, img_extents, num_img_extents,
2218 static void rbd_img_request_submit(struct rbd_img_request *img_request)
2220 struct rbd_obj_request *obj_request;
2222 dout("%s: img %p\n", __func__, img_request);
2224 rbd_img_request_get(img_request);
2225 for_each_obj_request(img_request, obj_request)
2226 rbd_obj_request_submit(obj_request);
2228 rbd_img_request_put(img_request);
2231 static int rbd_obj_read_from_parent(struct rbd_obj_request *obj_req)
2233 struct rbd_img_request *img_req = obj_req->img_request;
2234 struct rbd_img_request *child_img_req;
2237 child_img_req = rbd_img_request_create(img_req->rbd_dev->parent,
2242 __set_bit(IMG_REQ_CHILD, &child_img_req->flags);
2243 child_img_req->obj_request = obj_req;
2245 if (!rbd_img_is_write(img_req)) {
2246 switch (img_req->data_type) {
2247 case OBJ_REQUEST_BIO:
2248 ret = __rbd_img_fill_from_bio(child_img_req,
2249 obj_req->img_extents,
2250 obj_req->num_img_extents,
2253 case OBJ_REQUEST_BVECS:
2254 case OBJ_REQUEST_OWN_BVECS:
2255 ret = __rbd_img_fill_from_bvecs(child_img_req,
2256 obj_req->img_extents,
2257 obj_req->num_img_extents,
2258 &obj_req->bvec_pos);
2264 ret = rbd_img_fill_from_bvecs(child_img_req,
2265 obj_req->img_extents,
2266 obj_req->num_img_extents,
2267 obj_req->copyup_bvecs);
2270 rbd_img_request_put(child_img_req);
2274 rbd_img_request_submit(child_img_req);
2278 static bool rbd_obj_handle_read(struct rbd_obj_request *obj_req)
2280 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2283 if (obj_req->result == -ENOENT &&
2284 rbd_dev->parent_overlap && !obj_req->tried_parent) {
2285 /* reverse map this object extent onto the parent */
2286 ret = rbd_obj_calc_img_extents(obj_req, false);
2288 obj_req->result = ret;
2292 if (obj_req->num_img_extents) {
2293 obj_req->tried_parent = true;
2294 ret = rbd_obj_read_from_parent(obj_req);
2296 obj_req->result = ret;
2304 * -ENOENT means a hole in the image -- zero-fill the entire
2305 * length of the request. A short read also implies zero-fill
2306 * to the end of the request. In both cases we update xferred
2307 * count to indicate the whole request was satisfied.
2309 if (obj_req->result == -ENOENT ||
2310 (!obj_req->result && obj_req->xferred < obj_req->ex.oe_len)) {
2311 rbd_assert(!obj_req->xferred || !obj_req->result);
2312 rbd_obj_zero_range(obj_req, obj_req->xferred,
2313 obj_req->ex.oe_len - obj_req->xferred);
2314 obj_req->result = 0;
2315 obj_req->xferred = obj_req->ex.oe_len;
2322 * copyup_bvecs pages are never highmem pages
2324 static bool is_zero_bvecs(struct bio_vec *bvecs, u32 bytes)
2326 struct ceph_bvec_iter it = {
2328 .iter = { .bi_size = bytes },
2331 ceph_bvec_iter_advance_step(&it, bytes, ({
2332 if (memchr_inv(page_address(bv.bv_page) + bv.bv_offset, 0,
2339 static int rbd_obj_issue_copyup(struct rbd_obj_request *obj_req, u32 bytes)
2341 unsigned int num_osd_ops = obj_req->osd_req->r_num_ops;
2344 dout("%s obj_req %p bytes %u\n", __func__, obj_req, bytes);
2345 rbd_assert(obj_req->osd_req->r_ops[0].op == CEPH_OSD_OP_STAT);
2346 rbd_osd_req_destroy(obj_req->osd_req);
2349 * Create a copyup request with the same number of OSD ops as
2350 * the original request. The original request was stat + op(s),
2351 * the new copyup request will be copyup + the same op(s).
2353 obj_req->osd_req = rbd_osd_req_create(obj_req, num_osd_ops);
2354 if (!obj_req->osd_req)
2357 ret = osd_req_op_cls_init(obj_req->osd_req, 0, CEPH_OSD_OP_CALL, "rbd",
2363 * Only send non-zero copyup data to save some I/O and network
2364 * bandwidth -- zero copyup data is equivalent to the object not
2367 if (is_zero_bvecs(obj_req->copyup_bvecs, bytes)) {
2368 dout("%s obj_req %p detected zeroes\n", __func__, obj_req);
2371 osd_req_op_cls_request_data_bvecs(obj_req->osd_req, 0,
2372 obj_req->copyup_bvecs,
2373 obj_req->copyup_bvec_count,
2376 switch (obj_req->img_request->op_type) {
2378 __rbd_obj_setup_write(obj_req, 1);
2380 case OBJ_OP_DISCARD:
2381 rbd_assert(!rbd_obj_is_entire(obj_req));
2382 __rbd_obj_setup_discard(obj_req, 1);
2388 rbd_obj_request_submit(obj_req);
2392 static int setup_copyup_bvecs(struct rbd_obj_request *obj_req, u64 obj_overlap)
2396 rbd_assert(!obj_req->copyup_bvecs);
2397 obj_req->copyup_bvec_count = calc_pages_for(0, obj_overlap);
2398 obj_req->copyup_bvecs = kcalloc(obj_req->copyup_bvec_count,
2399 sizeof(*obj_req->copyup_bvecs),
2401 if (!obj_req->copyup_bvecs)
2404 for (i = 0; i < obj_req->copyup_bvec_count; i++) {
2405 unsigned int len = min(obj_overlap, (u64)PAGE_SIZE);
2407 obj_req->copyup_bvecs[i].bv_page = alloc_page(GFP_NOIO);
2408 if (!obj_req->copyup_bvecs[i].bv_page)
2411 obj_req->copyup_bvecs[i].bv_offset = 0;
2412 obj_req->copyup_bvecs[i].bv_len = len;
2416 rbd_assert(!obj_overlap);
2420 static int rbd_obj_handle_write_guard(struct rbd_obj_request *obj_req)
2422 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2425 rbd_assert(obj_req->num_img_extents);
2426 prune_extents(obj_req->img_extents, &obj_req->num_img_extents,
2427 rbd_dev->parent_overlap);
2428 if (!obj_req->num_img_extents) {
2430 * The overlap has become 0 (most likely because the
2431 * image has been flattened). Use rbd_obj_issue_copyup()
2432 * to re-submit the original write request -- the copyup
2433 * operation itself will be a no-op, since someone must
2434 * have populated the child object while we weren't
2435 * looking. Move to WRITE_FLAT state as we'll be done
2436 * with the operation once the null copyup completes.
2438 obj_req->write_state = RBD_OBJ_WRITE_FLAT;
2439 return rbd_obj_issue_copyup(obj_req, 0);
2442 ret = setup_copyup_bvecs(obj_req, rbd_obj_img_extents_bytes(obj_req));
2446 obj_req->write_state = RBD_OBJ_WRITE_COPYUP;
2447 return rbd_obj_read_from_parent(obj_req);
2450 static bool rbd_obj_handle_write(struct rbd_obj_request *obj_req)
2455 switch (obj_req->write_state) {
2456 case RBD_OBJ_WRITE_GUARD:
2457 rbd_assert(!obj_req->xferred);
2458 if (obj_req->result == -ENOENT) {
2460 * The target object doesn't exist. Read the data for
2461 * the entire target object up to the overlap point (if
2462 * any) from the parent, so we can use it for a copyup.
2464 ret = rbd_obj_handle_write_guard(obj_req);
2466 obj_req->result = ret;
2472 case RBD_OBJ_WRITE_FLAT:
2473 if (!obj_req->result)
2475 * There is no such thing as a successful short
2476 * write -- indicate the whole request was satisfied.
2478 obj_req->xferred = obj_req->ex.oe_len;
2480 case RBD_OBJ_WRITE_COPYUP:
2481 obj_req->write_state = RBD_OBJ_WRITE_GUARD;
2482 if (obj_req->result)
2485 rbd_assert(obj_req->xferred);
2486 ret = rbd_obj_issue_copyup(obj_req, obj_req->xferred);
2488 obj_req->result = ret;
2498 * Returns true if @obj_req is completed, or false otherwise.
2500 static bool __rbd_obj_handle_request(struct rbd_obj_request *obj_req)
2502 switch (obj_req->img_request->op_type) {
2504 return rbd_obj_handle_read(obj_req);
2506 return rbd_obj_handle_write(obj_req);
2507 case OBJ_OP_DISCARD:
2508 if (rbd_obj_handle_write(obj_req)) {
2510 * Hide -ENOENT from delete/truncate/zero -- discarding
2511 * a non-existent object is not a problem.
2513 if (obj_req->result == -ENOENT) {
2514 obj_req->result = 0;
2515 obj_req->xferred = obj_req->ex.oe_len;
2525 static void rbd_obj_end_request(struct rbd_obj_request *obj_req)
2527 struct rbd_img_request *img_req = obj_req->img_request;
2529 rbd_assert((!obj_req->result &&
2530 obj_req->xferred == obj_req->ex.oe_len) ||
2531 (obj_req->result < 0 && !obj_req->xferred));
2532 if (!obj_req->result) {
2533 img_req->xferred += obj_req->xferred;
2537 rbd_warn(img_req->rbd_dev,
2538 "%s at objno %llu %llu~%llu result %d xferred %llu",
2539 obj_op_name(img_req->op_type), obj_req->ex.oe_objno,
2540 obj_req->ex.oe_off, obj_req->ex.oe_len, obj_req->result,
2542 if (!img_req->result) {
2543 img_req->result = obj_req->result;
2544 img_req->xferred = 0;
2548 static void rbd_img_end_child_request(struct rbd_img_request *img_req)
2550 struct rbd_obj_request *obj_req = img_req->obj_request;
2552 rbd_assert(test_bit(IMG_REQ_CHILD, &img_req->flags));
2553 rbd_assert((!img_req->result &&
2554 img_req->xferred == rbd_obj_img_extents_bytes(obj_req)) ||
2555 (img_req->result < 0 && !img_req->xferred));
2557 obj_req->result = img_req->result;
2558 obj_req->xferred = img_req->xferred;
2559 rbd_img_request_put(img_req);
2562 static void rbd_img_end_request(struct rbd_img_request *img_req)
2564 rbd_assert(!test_bit(IMG_REQ_CHILD, &img_req->flags));
2565 rbd_assert((!img_req->result &&
2566 img_req->xferred == blk_rq_bytes(img_req->rq)) ||
2567 (img_req->result < 0 && !img_req->xferred));
2569 blk_mq_end_request(img_req->rq,
2570 errno_to_blk_status(img_req->result));
2571 rbd_img_request_put(img_req);
2574 static void rbd_obj_handle_request(struct rbd_obj_request *obj_req)
2576 struct rbd_img_request *img_req;
2579 if (!__rbd_obj_handle_request(obj_req))
2582 img_req = obj_req->img_request;
2583 spin_lock(&img_req->completion_lock);
2584 rbd_obj_end_request(obj_req);
2585 rbd_assert(img_req->pending_count);
2586 if (--img_req->pending_count) {
2587 spin_unlock(&img_req->completion_lock);
2591 spin_unlock(&img_req->completion_lock);
2592 if (test_bit(IMG_REQ_CHILD, &img_req->flags)) {
2593 obj_req = img_req->obj_request;
2594 rbd_img_end_child_request(img_req);
2597 rbd_img_end_request(img_req);
2600 static const struct rbd_client_id rbd_empty_cid;
2602 static bool rbd_cid_equal(const struct rbd_client_id *lhs,
2603 const struct rbd_client_id *rhs)
2605 return lhs->gid == rhs->gid && lhs->handle == rhs->handle;
2608 static struct rbd_client_id rbd_get_cid(struct rbd_device *rbd_dev)
2610 struct rbd_client_id cid;
2612 mutex_lock(&rbd_dev->watch_mutex);
2613 cid.gid = ceph_client_gid(rbd_dev->rbd_client->client);
2614 cid.handle = rbd_dev->watch_cookie;
2615 mutex_unlock(&rbd_dev->watch_mutex);
2620 * lock_rwsem must be held for write
2622 static void rbd_set_owner_cid(struct rbd_device *rbd_dev,
2623 const struct rbd_client_id *cid)
2625 dout("%s rbd_dev %p %llu-%llu -> %llu-%llu\n", __func__, rbd_dev,
2626 rbd_dev->owner_cid.gid, rbd_dev->owner_cid.handle,
2627 cid->gid, cid->handle);
2628 rbd_dev->owner_cid = *cid; /* struct */
2631 static void format_lock_cookie(struct rbd_device *rbd_dev, char *buf)
2633 mutex_lock(&rbd_dev->watch_mutex);
2634 sprintf(buf, "%s %llu", RBD_LOCK_COOKIE_PREFIX, rbd_dev->watch_cookie);
2635 mutex_unlock(&rbd_dev->watch_mutex);
2638 static void __rbd_lock(struct rbd_device *rbd_dev, const char *cookie)
2640 struct rbd_client_id cid = rbd_get_cid(rbd_dev);
2642 strcpy(rbd_dev->lock_cookie, cookie);
2643 rbd_set_owner_cid(rbd_dev, &cid);
2644 queue_work(rbd_dev->task_wq, &rbd_dev->acquired_lock_work);
2648 * lock_rwsem must be held for write
2650 static int rbd_lock(struct rbd_device *rbd_dev)
2652 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2656 WARN_ON(__rbd_is_lock_owner(rbd_dev) ||
2657 rbd_dev->lock_cookie[0] != '\0');
2659 format_lock_cookie(rbd_dev, cookie);
2660 ret = ceph_cls_lock(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
2661 RBD_LOCK_NAME, CEPH_CLS_LOCK_EXCLUSIVE, cookie,
2662 RBD_LOCK_TAG, "", 0);
2666 rbd_dev->lock_state = RBD_LOCK_STATE_LOCKED;
2667 __rbd_lock(rbd_dev, cookie);
2672 * lock_rwsem must be held for write
2674 static void rbd_unlock(struct rbd_device *rbd_dev)
2676 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2679 WARN_ON(!__rbd_is_lock_owner(rbd_dev) ||
2680 rbd_dev->lock_cookie[0] == '\0');
2682 ret = ceph_cls_unlock(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
2683 RBD_LOCK_NAME, rbd_dev->lock_cookie);
2684 if (ret && ret != -ENOENT)
2685 rbd_warn(rbd_dev, "failed to unlock: %d", ret);
2687 /* treat errors as the image is unlocked */
2688 rbd_dev->lock_state = RBD_LOCK_STATE_UNLOCKED;
2689 rbd_dev->lock_cookie[0] = '\0';
2690 rbd_set_owner_cid(rbd_dev, &rbd_empty_cid);
2691 queue_work(rbd_dev->task_wq, &rbd_dev->released_lock_work);
2694 static int __rbd_notify_op_lock(struct rbd_device *rbd_dev,
2695 enum rbd_notify_op notify_op,
2696 struct page ***preply_pages,
2699 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2700 struct rbd_client_id cid = rbd_get_cid(rbd_dev);
2701 char buf[4 + 8 + 8 + CEPH_ENCODING_START_BLK_LEN];
2702 int buf_size = sizeof(buf);
2705 dout("%s rbd_dev %p notify_op %d\n", __func__, rbd_dev, notify_op);
2707 /* encode *LockPayload NotifyMessage (op + ClientId) */
2708 ceph_start_encoding(&p, 2, 1, buf_size - CEPH_ENCODING_START_BLK_LEN);
2709 ceph_encode_32(&p, notify_op);
2710 ceph_encode_64(&p, cid.gid);
2711 ceph_encode_64(&p, cid.handle);
2713 return ceph_osdc_notify(osdc, &rbd_dev->header_oid,
2714 &rbd_dev->header_oloc, buf, buf_size,
2715 RBD_NOTIFY_TIMEOUT, preply_pages, preply_len);
2718 static void rbd_notify_op_lock(struct rbd_device *rbd_dev,
2719 enum rbd_notify_op notify_op)
2721 struct page **reply_pages;
2724 __rbd_notify_op_lock(rbd_dev, notify_op, &reply_pages, &reply_len);
2725 ceph_release_page_vector(reply_pages, calc_pages_for(0, reply_len));
2728 static void rbd_notify_acquired_lock(struct work_struct *work)
2730 struct rbd_device *rbd_dev = container_of(work, struct rbd_device,
2731 acquired_lock_work);
2733 rbd_notify_op_lock(rbd_dev, RBD_NOTIFY_OP_ACQUIRED_LOCK);
2736 static void rbd_notify_released_lock(struct work_struct *work)
2738 struct rbd_device *rbd_dev = container_of(work, struct rbd_device,
2739 released_lock_work);
2741 rbd_notify_op_lock(rbd_dev, RBD_NOTIFY_OP_RELEASED_LOCK);
2744 static int rbd_request_lock(struct rbd_device *rbd_dev)
2746 struct page **reply_pages;
2748 bool lock_owner_responded = false;
2751 dout("%s rbd_dev %p\n", __func__, rbd_dev);
2753 ret = __rbd_notify_op_lock(rbd_dev, RBD_NOTIFY_OP_REQUEST_LOCK,
2754 &reply_pages, &reply_len);
2755 if (ret && ret != -ETIMEDOUT) {
2756 rbd_warn(rbd_dev, "failed to request lock: %d", ret);
2760 if (reply_len > 0 && reply_len <= PAGE_SIZE) {
2761 void *p = page_address(reply_pages[0]);
2762 void *const end = p + reply_len;
2765 ceph_decode_32_safe(&p, end, n, e_inval); /* num_acks */
2770 ceph_decode_need(&p, end, 8 + 8, e_inval);
2771 p += 8 + 8; /* skip gid and cookie */
2773 ceph_decode_32_safe(&p, end, len, e_inval);
2777 if (lock_owner_responded) {
2779 "duplicate lock owners detected");
2784 lock_owner_responded = true;
2785 ret = ceph_start_decoding(&p, end, 1, "ResponseMessage",
2789 "failed to decode ResponseMessage: %d",
2794 ret = ceph_decode_32(&p);
2798 if (!lock_owner_responded) {
2799 rbd_warn(rbd_dev, "no lock owners detected");
2804 ceph_release_page_vector(reply_pages, calc_pages_for(0, reply_len));
2812 static void wake_requests(struct rbd_device *rbd_dev, bool wake_all)
2814 dout("%s rbd_dev %p wake_all %d\n", __func__, rbd_dev, wake_all);
2816 cancel_delayed_work(&rbd_dev->lock_dwork);
2818 wake_up_all(&rbd_dev->lock_waitq);
2820 wake_up(&rbd_dev->lock_waitq);
2823 static int get_lock_owner_info(struct rbd_device *rbd_dev,
2824 struct ceph_locker **lockers, u32 *num_lockers)
2826 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2831 dout("%s rbd_dev %p\n", __func__, rbd_dev);
2833 ret = ceph_cls_lock_info(osdc, &rbd_dev->header_oid,
2834 &rbd_dev->header_oloc, RBD_LOCK_NAME,
2835 &lock_type, &lock_tag, lockers, num_lockers);
2839 if (*num_lockers == 0) {
2840 dout("%s rbd_dev %p no lockers detected\n", __func__, rbd_dev);
2844 if (strcmp(lock_tag, RBD_LOCK_TAG)) {
2845 rbd_warn(rbd_dev, "locked by external mechanism, tag %s",
2851 if (lock_type == CEPH_CLS_LOCK_SHARED) {
2852 rbd_warn(rbd_dev, "shared lock type detected");
2857 if (strncmp((*lockers)[0].id.cookie, RBD_LOCK_COOKIE_PREFIX,
2858 strlen(RBD_LOCK_COOKIE_PREFIX))) {
2859 rbd_warn(rbd_dev, "locked by external mechanism, cookie %s",
2860 (*lockers)[0].id.cookie);
2870 static int find_watcher(struct rbd_device *rbd_dev,
2871 const struct ceph_locker *locker)
2873 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2874 struct ceph_watch_item *watchers;
2880 ret = ceph_osdc_list_watchers(osdc, &rbd_dev->header_oid,
2881 &rbd_dev->header_oloc, &watchers,
2886 sscanf(locker->id.cookie, RBD_LOCK_COOKIE_PREFIX " %llu", &cookie);
2887 for (i = 0; i < num_watchers; i++) {
2888 if (!memcmp(&watchers[i].addr, &locker->info.addr,
2889 sizeof(locker->info.addr)) &&
2890 watchers[i].cookie == cookie) {
2891 struct rbd_client_id cid = {
2892 .gid = le64_to_cpu(watchers[i].name.num),
2896 dout("%s rbd_dev %p found cid %llu-%llu\n", __func__,
2897 rbd_dev, cid.gid, cid.handle);
2898 rbd_set_owner_cid(rbd_dev, &cid);
2904 dout("%s rbd_dev %p no watchers\n", __func__, rbd_dev);
2912 * lock_rwsem must be held for write
2914 static int rbd_try_lock(struct rbd_device *rbd_dev)
2916 struct ceph_client *client = rbd_dev->rbd_client->client;
2917 struct ceph_locker *lockers;
2922 ret = rbd_lock(rbd_dev);
2926 /* determine if the current lock holder is still alive */
2927 ret = get_lock_owner_info(rbd_dev, &lockers, &num_lockers);
2931 if (num_lockers == 0)
2934 ret = find_watcher(rbd_dev, lockers);
2937 ret = 0; /* have to request lock */
2941 rbd_warn(rbd_dev, "%s%llu seems dead, breaking lock",
2942 ENTITY_NAME(lockers[0].id.name));
2944 ret = ceph_monc_blacklist_add(&client->monc,
2945 &lockers[0].info.addr);
2947 rbd_warn(rbd_dev, "blacklist of %s%llu failed: %d",
2948 ENTITY_NAME(lockers[0].id.name), ret);
2952 ret = ceph_cls_break_lock(&client->osdc, &rbd_dev->header_oid,
2953 &rbd_dev->header_oloc, RBD_LOCK_NAME,
2954 lockers[0].id.cookie,
2955 &lockers[0].id.name);
2956 if (ret && ret != -ENOENT)
2960 ceph_free_lockers(lockers, num_lockers);
2964 ceph_free_lockers(lockers, num_lockers);
2969 * ret is set only if lock_state is RBD_LOCK_STATE_UNLOCKED
2971 static enum rbd_lock_state rbd_try_acquire_lock(struct rbd_device *rbd_dev,
2974 enum rbd_lock_state lock_state;
2976 down_read(&rbd_dev->lock_rwsem);
2977 dout("%s rbd_dev %p read lock_state %d\n", __func__, rbd_dev,
2978 rbd_dev->lock_state);
2979 if (__rbd_is_lock_owner(rbd_dev)) {
2980 lock_state = rbd_dev->lock_state;
2981 up_read(&rbd_dev->lock_rwsem);
2985 up_read(&rbd_dev->lock_rwsem);
2986 down_write(&rbd_dev->lock_rwsem);
2987 dout("%s rbd_dev %p write lock_state %d\n", __func__, rbd_dev,
2988 rbd_dev->lock_state);
2989 if (!__rbd_is_lock_owner(rbd_dev)) {
2990 *pret = rbd_try_lock(rbd_dev);
2992 rbd_warn(rbd_dev, "failed to acquire lock: %d", *pret);
2995 lock_state = rbd_dev->lock_state;
2996 up_write(&rbd_dev->lock_rwsem);
3000 static void rbd_acquire_lock(struct work_struct *work)
3002 struct rbd_device *rbd_dev = container_of(to_delayed_work(work),
3003 struct rbd_device, lock_dwork);
3004 enum rbd_lock_state lock_state;
3007 dout("%s rbd_dev %p\n", __func__, rbd_dev);
3009 lock_state = rbd_try_acquire_lock(rbd_dev, &ret);
3010 if (lock_state != RBD_LOCK_STATE_UNLOCKED || ret == -EBLACKLISTED) {
3011 if (lock_state == RBD_LOCK_STATE_LOCKED)
3012 wake_requests(rbd_dev, true);
3013 dout("%s rbd_dev %p lock_state %d ret %d - done\n", __func__,
3014 rbd_dev, lock_state, ret);
3018 ret = rbd_request_lock(rbd_dev);
3019 if (ret == -ETIMEDOUT) {
3020 goto again; /* treat this as a dead client */
3021 } else if (ret == -EROFS) {
3022 rbd_warn(rbd_dev, "peer will not release lock");
3024 * If this is rbd_add_acquire_lock(), we want to fail
3025 * immediately -- reuse BLACKLISTED flag. Otherwise we
3028 if (!(rbd_dev->disk->flags & GENHD_FL_UP)) {
3029 set_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags);
3030 /* wake "rbd map --exclusive" process */
3031 wake_requests(rbd_dev, false);
3033 } else if (ret < 0) {
3034 rbd_warn(rbd_dev, "error requesting lock: %d", ret);
3035 mod_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork,
3039 * lock owner acked, but resend if we don't see them
3042 dout("%s rbd_dev %p requeueing lock_dwork\n", __func__,
3044 mod_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork,
3045 msecs_to_jiffies(2 * RBD_NOTIFY_TIMEOUT * MSEC_PER_SEC));
3050 * lock_rwsem must be held for write
3052 static bool rbd_release_lock(struct rbd_device *rbd_dev)
3054 dout("%s rbd_dev %p read lock_state %d\n", __func__, rbd_dev,
3055 rbd_dev->lock_state);
3056 if (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED)
3059 rbd_dev->lock_state = RBD_LOCK_STATE_RELEASING;
3060 downgrade_write(&rbd_dev->lock_rwsem);
3062 * Ensure that all in-flight IO is flushed.
3064 * FIXME: ceph_osdc_sync() flushes the entire OSD client, which
3065 * may be shared with other devices.
3067 ceph_osdc_sync(&rbd_dev->rbd_client->client->osdc);
3068 up_read(&rbd_dev->lock_rwsem);
3070 down_write(&rbd_dev->lock_rwsem);
3071 dout("%s rbd_dev %p write lock_state %d\n", __func__, rbd_dev,
3072 rbd_dev->lock_state);
3073 if (rbd_dev->lock_state != RBD_LOCK_STATE_RELEASING)
3076 rbd_unlock(rbd_dev);
3078 * Give others a chance to grab the lock - we would re-acquire
3079 * almost immediately if we got new IO during ceph_osdc_sync()
3080 * otherwise. We need to ack our own notifications, so this
3081 * lock_dwork will be requeued from rbd_wait_state_locked()
3082 * after wake_requests() in rbd_handle_released_lock().
3084 cancel_delayed_work(&rbd_dev->lock_dwork);
3088 static void rbd_release_lock_work(struct work_struct *work)
3090 struct rbd_device *rbd_dev = container_of(work, struct rbd_device,
3093 down_write(&rbd_dev->lock_rwsem);
3094 rbd_release_lock(rbd_dev);
3095 up_write(&rbd_dev->lock_rwsem);
3098 static void rbd_handle_acquired_lock(struct rbd_device *rbd_dev, u8 struct_v,
3101 struct rbd_client_id cid = { 0 };
3103 if (struct_v >= 2) {
3104 cid.gid = ceph_decode_64(p);
3105 cid.handle = ceph_decode_64(p);
3108 dout("%s rbd_dev %p cid %llu-%llu\n", __func__, rbd_dev, cid.gid,
3110 if (!rbd_cid_equal(&cid, &rbd_empty_cid)) {
3111 down_write(&rbd_dev->lock_rwsem);
3112 if (rbd_cid_equal(&cid, &rbd_dev->owner_cid)) {
3114 * we already know that the remote client is
3117 up_write(&rbd_dev->lock_rwsem);
3121 rbd_set_owner_cid(rbd_dev, &cid);
3122 downgrade_write(&rbd_dev->lock_rwsem);
3124 down_read(&rbd_dev->lock_rwsem);
3127 if (!__rbd_is_lock_owner(rbd_dev))
3128 wake_requests(rbd_dev, false);
3129 up_read(&rbd_dev->lock_rwsem);
3132 static void rbd_handle_released_lock(struct rbd_device *rbd_dev, u8 struct_v,
3135 struct rbd_client_id cid = { 0 };
3137 if (struct_v >= 2) {
3138 cid.gid = ceph_decode_64(p);
3139 cid.handle = ceph_decode_64(p);
3142 dout("%s rbd_dev %p cid %llu-%llu\n", __func__, rbd_dev, cid.gid,
3144 if (!rbd_cid_equal(&cid, &rbd_empty_cid)) {
3145 down_write(&rbd_dev->lock_rwsem);
3146 if (!rbd_cid_equal(&cid, &rbd_dev->owner_cid)) {
3147 dout("%s rbd_dev %p unexpected owner, cid %llu-%llu != owner_cid %llu-%llu\n",
3148 __func__, rbd_dev, cid.gid, cid.handle,
3149 rbd_dev->owner_cid.gid, rbd_dev->owner_cid.handle);
3150 up_write(&rbd_dev->lock_rwsem);
3154 rbd_set_owner_cid(rbd_dev, &rbd_empty_cid);
3155 downgrade_write(&rbd_dev->lock_rwsem);
3157 down_read(&rbd_dev->lock_rwsem);
3160 if (!__rbd_is_lock_owner(rbd_dev))
3161 wake_requests(rbd_dev, false);
3162 up_read(&rbd_dev->lock_rwsem);
3166 * Returns result for ResponseMessage to be encoded (<= 0), or 1 if no
3167 * ResponseMessage is needed.
3169 static int rbd_handle_request_lock(struct rbd_device *rbd_dev, u8 struct_v,
3172 struct rbd_client_id my_cid = rbd_get_cid(rbd_dev);
3173 struct rbd_client_id cid = { 0 };
3176 if (struct_v >= 2) {
3177 cid.gid = ceph_decode_64(p);
3178 cid.handle = ceph_decode_64(p);
3181 dout("%s rbd_dev %p cid %llu-%llu\n", __func__, rbd_dev, cid.gid,
3183 if (rbd_cid_equal(&cid, &my_cid))
3186 down_read(&rbd_dev->lock_rwsem);
3187 if (__rbd_is_lock_owner(rbd_dev)) {
3188 if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED &&
3189 rbd_cid_equal(&rbd_dev->owner_cid, &rbd_empty_cid))
3193 * encode ResponseMessage(0) so the peer can detect
3198 if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED) {
3199 if (!rbd_dev->opts->exclusive) {
3200 dout("%s rbd_dev %p queueing unlock_work\n",
3202 queue_work(rbd_dev->task_wq,
3203 &rbd_dev->unlock_work);
3205 /* refuse to release the lock */
3212 up_read(&rbd_dev->lock_rwsem);
3216 static void __rbd_acknowledge_notify(struct rbd_device *rbd_dev,
3217 u64 notify_id, u64 cookie, s32 *result)
3219 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3220 char buf[4 + CEPH_ENCODING_START_BLK_LEN];
3221 int buf_size = sizeof(buf);
3227 /* encode ResponseMessage */
3228 ceph_start_encoding(&p, 1, 1,
3229 buf_size - CEPH_ENCODING_START_BLK_LEN);
3230 ceph_encode_32(&p, *result);
3235 ret = ceph_osdc_notify_ack(osdc, &rbd_dev->header_oid,
3236 &rbd_dev->header_oloc, notify_id, cookie,
3239 rbd_warn(rbd_dev, "acknowledge_notify failed: %d", ret);
3242 static void rbd_acknowledge_notify(struct rbd_device *rbd_dev, u64 notify_id,
3245 dout("%s rbd_dev %p\n", __func__, rbd_dev);
3246 __rbd_acknowledge_notify(rbd_dev, notify_id, cookie, NULL);
3249 static void rbd_acknowledge_notify_result(struct rbd_device *rbd_dev,
3250 u64 notify_id, u64 cookie, s32 result)
3252 dout("%s rbd_dev %p result %d\n", __func__, rbd_dev, result);
3253 __rbd_acknowledge_notify(rbd_dev, notify_id, cookie, &result);
3256 static void rbd_watch_cb(void *arg, u64 notify_id, u64 cookie,
3257 u64 notifier_id, void *data, size_t data_len)
3259 struct rbd_device *rbd_dev = arg;
3261 void *const end = p + data_len;
3267 dout("%s rbd_dev %p cookie %llu notify_id %llu data_len %zu\n",
3268 __func__, rbd_dev, cookie, notify_id, data_len);
3270 ret = ceph_start_decoding(&p, end, 1, "NotifyMessage",
3273 rbd_warn(rbd_dev, "failed to decode NotifyMessage: %d",
3278 notify_op = ceph_decode_32(&p);
3280 /* legacy notification for header updates */
3281 notify_op = RBD_NOTIFY_OP_HEADER_UPDATE;
3285 dout("%s rbd_dev %p notify_op %u\n", __func__, rbd_dev, notify_op);
3286 switch (notify_op) {
3287 case RBD_NOTIFY_OP_ACQUIRED_LOCK:
3288 rbd_handle_acquired_lock(rbd_dev, struct_v, &p);
3289 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
3291 case RBD_NOTIFY_OP_RELEASED_LOCK:
3292 rbd_handle_released_lock(rbd_dev, struct_v, &p);
3293 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
3295 case RBD_NOTIFY_OP_REQUEST_LOCK:
3296 ret = rbd_handle_request_lock(rbd_dev, struct_v, &p);
3298 rbd_acknowledge_notify_result(rbd_dev, notify_id,
3301 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
3303 case RBD_NOTIFY_OP_HEADER_UPDATE:
3304 ret = rbd_dev_refresh(rbd_dev);
3306 rbd_warn(rbd_dev, "refresh failed: %d", ret);
3308 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
3311 if (rbd_is_lock_owner(rbd_dev))
3312 rbd_acknowledge_notify_result(rbd_dev, notify_id,
3313 cookie, -EOPNOTSUPP);
3315 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);