3 rbd.c -- Export ceph rados objects as a Linux block device
6 based on drivers/block/osdblk.c:
8 Copyright 2009 Red Hat, Inc.
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program; see the file COPYING. If not, write to
21 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 For usage instructions, please refer to:
27 Documentation/ABI/testing/sysfs-bus-rbd
31 #include <linux/ceph/libceph.h>
32 #include <linux/ceph/osd_client.h>
33 #include <linux/ceph/mon_client.h>
34 #include <linux/ceph/cls_lock_client.h>
35 #include <linux/ceph/striper.h>
36 #include <linux/ceph/decode.h>
37 #include <linux/parser.h>
38 #include <linux/bsearch.h>
40 #include <linux/kernel.h>
41 #include <linux/device.h>
42 #include <linux/module.h>
43 #include <linux/blk-mq.h>
45 #include <linux/blkdev.h>
46 #include <linux/slab.h>
47 #include <linux/idr.h>
48 #include <linux/workqueue.h>
50 #include "rbd_types.h"
52 #define RBD_DEBUG /* Activate rbd_assert() calls */
55 * Increment the given counter and return its updated value.
56 * If the counter is already 0 it will not be incremented.
57 * If the counter is already at its maximum value returns
58 * -EINVAL without updating it.
60 static int atomic_inc_return_safe(atomic_t *v)
64 counter = (unsigned int)__atomic_add_unless(v, 1, 0);
65 if (counter <= (unsigned int)INT_MAX)
73 /* Decrement the counter. Return the resulting value, or -EINVAL */
74 static int atomic_dec_return_safe(atomic_t *v)
78 counter = atomic_dec_return(v);
87 #define RBD_DRV_NAME "rbd"
89 #define RBD_MINORS_PER_MAJOR 256
90 #define RBD_SINGLE_MAJOR_PART_SHIFT 4
92 #define RBD_MAX_PARENT_CHAIN_LEN 16
94 #define RBD_SNAP_DEV_NAME_PREFIX "snap_"
95 #define RBD_MAX_SNAP_NAME_LEN \
96 (NAME_MAX - (sizeof (RBD_SNAP_DEV_NAME_PREFIX) - 1))
98 #define RBD_MAX_SNAP_COUNT 510 /* allows max snapc to fit in 4KB */
100 #define RBD_SNAP_HEAD_NAME "-"
102 #define BAD_SNAP_INDEX U32_MAX /* invalid index into snap array */
104 /* This allows a single page to hold an image name sent by OSD */
105 #define RBD_IMAGE_NAME_LEN_MAX (PAGE_SIZE - sizeof (__le32) - 1)
106 #define RBD_IMAGE_ID_LEN_MAX 64
108 #define RBD_OBJ_PREFIX_LEN_MAX 64
110 #define RBD_NOTIFY_TIMEOUT 5 /* seconds */
111 #define RBD_RETRY_DELAY msecs_to_jiffies(1000)
115 #define RBD_FEATURE_LAYERING (1ULL<<0)
116 #define RBD_FEATURE_STRIPINGV2 (1ULL<<1)
117 #define RBD_FEATURE_EXCLUSIVE_LOCK (1ULL<<2)
118 #define RBD_FEATURE_DATA_POOL (1ULL<<7)
119 #define RBD_FEATURE_OPERATIONS (1ULL<<8)
121 #define RBD_FEATURES_ALL (RBD_FEATURE_LAYERING | \
122 RBD_FEATURE_STRIPINGV2 | \
123 RBD_FEATURE_EXCLUSIVE_LOCK | \
124 RBD_FEATURE_DATA_POOL | \
125 RBD_FEATURE_OPERATIONS)
127 /* Features supported by this (client software) implementation. */
129 #define RBD_FEATURES_SUPPORTED (RBD_FEATURES_ALL)
132 * An RBD device name will be "rbd#", where the "rbd" comes from
133 * RBD_DRV_NAME above, and # is a unique integer identifier.
135 #define DEV_NAME_LEN 32
138 * block device image metadata (in-memory version)
140 struct rbd_image_header {
141 /* These six fields never change for a given rbd image */
147 u64 features; /* Might be changeable someday? */
149 /* The remaining fields need to be updated occasionally */
151 struct ceph_snap_context *snapc;
152 char *snap_names; /* format 1 only */
153 u64 *snap_sizes; /* format 1 only */
157 * An rbd image specification.
159 * The tuple (pool_id, image_id, snap_id) is sufficient to uniquely
160 * identify an image. Each rbd_dev structure includes a pointer to
161 * an rbd_spec structure that encapsulates this identity.
163 * Each of the id's in an rbd_spec has an associated name. For a
164 * user-mapped image, the names are supplied and the id's associated
165 * with them are looked up. For a layered image, a parent image is
166 * defined by the tuple, and the names are looked up.
168 * An rbd_dev structure contains a parent_spec pointer which is
169 * non-null if the image it represents is a child in a layered
170 * image. This pointer will refer to the rbd_spec structure used
171 * by the parent rbd_dev for its own identity (i.e., the structure
172 * is shared between the parent and child).
174 * Since these structures are populated once, during the discovery
175 * phase of image construction, they are effectively immutable so
176 * we make no effort to synchronize access to them.
178 * Note that code herein does not assume the image name is known (it
179 * could be a null pointer).
183 const char *pool_name;
185 const char *image_id;
186 const char *image_name;
189 const char *snap_name;
195 * an instance of the client. multiple devices may share an rbd client.
198 struct ceph_client *client;
200 struct list_head node;
203 struct rbd_img_request;
205 enum obj_request_type {
206 OBJ_REQUEST_NODATA = 1,
207 OBJ_REQUEST_BIO, /* pointer into provided bio (list) */
208 OBJ_REQUEST_BVECS, /* pointer into provided bio_vec array */
209 OBJ_REQUEST_OWN_BVECS, /* private bio_vec array, doesn't own pages */
212 enum obj_operation_type {
219 * Writes go through the following state machine to deal with
223 * RBD_OBJ_WRITE_GUARD ---------------> RBD_OBJ_WRITE_COPYUP
225 * v \------------------------------/
231 * Writes start in RBD_OBJ_WRITE_GUARD or _FLAT, depending on whether
232 * there is a parent or not.
234 enum rbd_obj_write_state {
235 RBD_OBJ_WRITE_FLAT = 1,
237 RBD_OBJ_WRITE_COPYUP,
240 struct rbd_obj_request {
241 struct ceph_object_extent ex;
243 bool tried_parent; /* for reads */
244 enum rbd_obj_write_state write_state; /* for writes */
247 struct rbd_img_request *img_request;
248 struct ceph_file_extent *img_extents;
252 struct ceph_bio_iter bio_pos;
254 struct ceph_bvec_iter bvec_pos;
259 struct bio_vec *copyup_bvecs;
260 u32 copyup_bvec_count;
262 struct ceph_osd_request *osd_req;
264 u64 xferred; /* bytes transferred */
271 IMG_REQ_CHILD, /* initiator: block = 0, child image = 1 */
272 IMG_REQ_LAYERED, /* ENOENT handling: normal = 0, layered = 1 */
275 struct rbd_img_request {
276 struct rbd_device *rbd_dev;
277 enum obj_operation_type op_type;
278 enum obj_request_type data_type;
281 u64 snap_id; /* for reads */
282 struct ceph_snap_context *snapc; /* for writes */
285 struct request *rq; /* block request */
286 struct rbd_obj_request *obj_request; /* obj req initiator */
288 spinlock_t completion_lock;
289 u64 xferred;/* aggregate bytes transferred */
290 int result; /* first nonzero obj_request result */
292 struct list_head object_extents; /* obj_req.ex structs */
293 u32 obj_request_count;
299 #define for_each_obj_request(ireq, oreq) \
300 list_for_each_entry(oreq, &(ireq)->object_extents, ex.oe_item)
301 #define for_each_obj_request_safe(ireq, oreq, n) \
302 list_for_each_entry_safe(oreq, n, &(ireq)->object_extents, ex.oe_item)
304 enum rbd_watch_state {
305 RBD_WATCH_STATE_UNREGISTERED,
306 RBD_WATCH_STATE_REGISTERED,
307 RBD_WATCH_STATE_ERROR,
310 enum rbd_lock_state {
311 RBD_LOCK_STATE_UNLOCKED,
312 RBD_LOCK_STATE_LOCKED,
313 RBD_LOCK_STATE_RELEASING,
316 /* WatchNotify::ClientId */
317 struct rbd_client_id {
331 int dev_id; /* blkdev unique id */
333 int major; /* blkdev assigned major */
335 struct gendisk *disk; /* blkdev's gendisk and rq */
337 u32 image_format; /* Either 1 or 2 */
338 struct rbd_client *rbd_client;
340 char name[DEV_NAME_LEN]; /* blkdev name, e.g. rbd3 */
342 spinlock_t lock; /* queue, flags, open_count */
344 struct rbd_image_header header;
345 unsigned long flags; /* possibly lock protected */
346 struct rbd_spec *spec;
347 struct rbd_options *opts;
348 char *config_info; /* add{,_single_major} string */
350 struct ceph_object_id header_oid;
351 struct ceph_object_locator header_oloc;
353 struct ceph_file_layout layout; /* used for all rbd requests */
355 struct mutex watch_mutex;
356 enum rbd_watch_state watch_state;
357 struct ceph_osd_linger_request *watch_handle;
359 struct delayed_work watch_dwork;
361 struct rw_semaphore lock_rwsem;
362 enum rbd_lock_state lock_state;
363 char lock_cookie[32];
364 struct rbd_client_id owner_cid;
365 struct work_struct acquired_lock_work;
366 struct work_struct released_lock_work;
367 struct delayed_work lock_dwork;
368 struct work_struct unlock_work;
369 wait_queue_head_t lock_waitq;
371 struct workqueue_struct *task_wq;
373 struct rbd_spec *parent_spec;
376 struct rbd_device *parent;
378 /* Block layer tags. */
379 struct blk_mq_tag_set tag_set;
381 /* protects updating the header */
382 struct rw_semaphore header_rwsem;
384 struct rbd_mapping mapping;
386 struct list_head node;
390 unsigned long open_count; /* protected by lock */
394 * Flag bits for rbd_dev->flags:
395 * - REMOVING (which is coupled with rbd_dev->open_count) is protected
397 * - BLACKLISTED is protected by rbd_dev->lock_rwsem
400 RBD_DEV_FLAG_EXISTS, /* mapped snapshot has not been deleted */
401 RBD_DEV_FLAG_REMOVING, /* this mapping is being removed */
402 RBD_DEV_FLAG_BLACKLISTED, /* our ceph_client is blacklisted */
405 static DEFINE_MUTEX(client_mutex); /* Serialize client creation */
407 static LIST_HEAD(rbd_dev_list); /* devices */
408 static DEFINE_SPINLOCK(rbd_dev_list_lock);
410 static LIST_HEAD(rbd_client_list); /* clients */
411 static DEFINE_SPINLOCK(rbd_client_list_lock);
413 /* Slab caches for frequently-allocated structures */
415 static struct kmem_cache *rbd_img_request_cache;
416 static struct kmem_cache *rbd_obj_request_cache;
418 static int rbd_major;
419 static DEFINE_IDA(rbd_dev_id_ida);
421 static struct workqueue_struct *rbd_wq;
424 * single-major requires >= 0.75 version of userspace rbd utility.
426 static bool single_major = true;
427 module_param(single_major, bool, S_IRUGO);
428 MODULE_PARM_DESC(single_major, "Use a single major number for all rbd devices (default: true)");
430 static ssize_t rbd_add(struct bus_type *bus, const char *buf,
432 static ssize_t rbd_remove(struct bus_type *bus, const char *buf,
434 static ssize_t rbd_add_single_major(struct bus_type *bus, const char *buf,
436 static ssize_t rbd_remove_single_major(struct bus_type *bus, const char *buf,
438 static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth);
440 static int rbd_dev_id_to_minor(int dev_id)
442 return dev_id << RBD_SINGLE_MAJOR_PART_SHIFT;
445 static int minor_to_rbd_dev_id(int minor)
447 return minor >> RBD_SINGLE_MAJOR_PART_SHIFT;
450 static bool __rbd_is_lock_owner(struct rbd_device *rbd_dev)
452 return rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED ||
453 rbd_dev->lock_state == RBD_LOCK_STATE_RELEASING;
456 static bool rbd_is_lock_owner(struct rbd_device *rbd_dev)
460 down_read(&rbd_dev->lock_rwsem);
461 is_lock_owner = __rbd_is_lock_owner(rbd_dev);
462 up_read(&rbd_dev->lock_rwsem);
463 return is_lock_owner;
466 static ssize_t rbd_supported_features_show(struct bus_type *bus, char *buf)
468 return sprintf(buf, "0x%llx\n", RBD_FEATURES_SUPPORTED);
471 static BUS_ATTR(add, S_IWUSR, NULL, rbd_add);
472 static BUS_ATTR(remove, S_IWUSR, NULL, rbd_remove);
473 static BUS_ATTR(add_single_major, S_IWUSR, NULL, rbd_add_single_major);
474 static BUS_ATTR(remove_single_major, S_IWUSR, NULL, rbd_remove_single_major);
475 static BUS_ATTR(supported_features, S_IRUGO, rbd_supported_features_show, NULL);
477 static struct attribute *rbd_bus_attrs[] = {
479 &bus_attr_remove.attr,
480 &bus_attr_add_single_major.attr,
481 &bus_attr_remove_single_major.attr,
482 &bus_attr_supported_features.attr,
486 static umode_t rbd_bus_is_visible(struct kobject *kobj,
487 struct attribute *attr, int index)
490 (attr == &bus_attr_add_single_major.attr ||
491 attr == &bus_attr_remove_single_major.attr))
497 static const struct attribute_group rbd_bus_group = {
498 .attrs = rbd_bus_attrs,
499 .is_visible = rbd_bus_is_visible,
501 __ATTRIBUTE_GROUPS(rbd_bus);
503 static struct bus_type rbd_bus_type = {
505 .bus_groups = rbd_bus_groups,
508 static void rbd_root_dev_release(struct device *dev)
512 static struct device rbd_root_dev = {
514 .release = rbd_root_dev_release,
517 static __printf(2, 3)
518 void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...)
520 struct va_format vaf;
528 printk(KERN_WARNING "%s: %pV\n", RBD_DRV_NAME, &vaf);
529 else if (rbd_dev->disk)
530 printk(KERN_WARNING "%s: %s: %pV\n",
531 RBD_DRV_NAME, rbd_dev->disk->disk_name, &vaf);
532 else if (rbd_dev->spec && rbd_dev->spec->image_name)
533 printk(KERN_WARNING "%s: image %s: %pV\n",
534 RBD_DRV_NAME, rbd_dev->spec->image_name, &vaf);
535 else if (rbd_dev->spec && rbd_dev->spec->image_id)
536 printk(KERN_WARNING "%s: id %s: %pV\n",
537 RBD_DRV_NAME, rbd_dev->spec->image_id, &vaf);
539 printk(KERN_WARNING "%s: rbd_dev %p: %pV\n",
540 RBD_DRV_NAME, rbd_dev, &vaf);
545 #define rbd_assert(expr) \
546 if (unlikely(!(expr))) { \
547 printk(KERN_ERR "\nAssertion failure in %s() " \
549 "\trbd_assert(%s);\n\n", \
550 __func__, __LINE__, #expr); \
553 #else /* !RBD_DEBUG */
554 # define rbd_assert(expr) ((void) 0)
555 #endif /* !RBD_DEBUG */
557 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
559 static int rbd_dev_refresh(struct rbd_device *rbd_dev);
560 static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev);
561 static int rbd_dev_header_info(struct rbd_device *rbd_dev);
562 static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev);
563 static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
565 static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
566 u8 *order, u64 *snap_size);
567 static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
570 static int rbd_open(struct block_device *bdev, fmode_t mode)
572 struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
573 bool removing = false;
575 spin_lock_irq(&rbd_dev->lock);
576 if (test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags))
579 rbd_dev->open_count++;
580 spin_unlock_irq(&rbd_dev->lock);
584 (void) get_device(&rbd_dev->dev);
589 static void rbd_release(struct gendisk *disk, fmode_t mode)
591 struct rbd_device *rbd_dev = disk->private_data;
592 unsigned long open_count_before;
594 spin_lock_irq(&rbd_dev->lock);
595 open_count_before = rbd_dev->open_count--;
596 spin_unlock_irq(&rbd_dev->lock);
597 rbd_assert(open_count_before > 0);
599 put_device(&rbd_dev->dev);
602 static int rbd_ioctl_set_ro(struct rbd_device *rbd_dev, unsigned long arg)
606 if (get_user(ro, (int __user *)arg))
609 /* Snapshots can't be marked read-write */
610 if (rbd_dev->spec->snap_id != CEPH_NOSNAP && !ro)
613 /* Let blkdev_roset() handle it */
617 static int rbd_ioctl(struct block_device *bdev, fmode_t mode,
618 unsigned int cmd, unsigned long arg)
620 struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
625 ret = rbd_ioctl_set_ro(rbd_dev, arg);
635 static int rbd_compat_ioctl(struct block_device *bdev, fmode_t mode,
636 unsigned int cmd, unsigned long arg)
638 return rbd_ioctl(bdev, mode, cmd, arg);
640 #endif /* CONFIG_COMPAT */
642 static const struct block_device_operations rbd_bd_ops = {
643 .owner = THIS_MODULE,
645 .release = rbd_release,
648 .compat_ioctl = rbd_compat_ioctl,
653 * Initialize an rbd client instance. Success or not, this function
654 * consumes ceph_opts. Caller holds client_mutex.
656 static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts)
658 struct rbd_client *rbdc;
661 dout("%s:\n", __func__);
662 rbdc = kmalloc(sizeof(struct rbd_client), GFP_KERNEL);
666 kref_init(&rbdc->kref);
667 INIT_LIST_HEAD(&rbdc->node);
669 rbdc->client = ceph_create_client(ceph_opts, rbdc);
670 if (IS_ERR(rbdc->client))
672 ceph_opts = NULL; /* Now rbdc->client is responsible for ceph_opts */
674 ret = ceph_open_session(rbdc->client);
678 spin_lock(&rbd_client_list_lock);
679 list_add_tail(&rbdc->node, &rbd_client_list);
680 spin_unlock(&rbd_client_list_lock);
682 dout("%s: rbdc %p\n", __func__, rbdc);
686 ceph_destroy_client(rbdc->client);
691 ceph_destroy_options(ceph_opts);
692 dout("%s: error %d\n", __func__, ret);
697 static struct rbd_client *__rbd_get_client(struct rbd_client *rbdc)
699 kref_get(&rbdc->kref);
705 * Find a ceph client with specific addr and configuration. If
706 * found, bump its reference count.
708 static struct rbd_client *rbd_client_find(struct ceph_options *ceph_opts)
710 struct rbd_client *client_node;
713 if (ceph_opts->flags & CEPH_OPT_NOSHARE)
716 spin_lock(&rbd_client_list_lock);
717 list_for_each_entry(client_node, &rbd_client_list, node) {
718 if (!ceph_compare_options(ceph_opts, client_node->client)) {
719 __rbd_get_client(client_node);
725 spin_unlock(&rbd_client_list_lock);
727 return found ? client_node : NULL;
731 * (Per device) rbd map options
739 /* string args above */
748 static match_table_t rbd_opts_tokens = {
749 {Opt_queue_depth, "queue_depth=%d"},
750 {Opt_lock_timeout, "lock_timeout=%d"},
752 /* string args above */
753 {Opt_read_only, "read_only"},
754 {Opt_read_only, "ro"}, /* Alternate spelling */
755 {Opt_read_write, "read_write"},
756 {Opt_read_write, "rw"}, /* Alternate spelling */
757 {Opt_lock_on_read, "lock_on_read"},
758 {Opt_exclusive, "exclusive"},
759 {Opt_notrim, "notrim"},
765 unsigned long lock_timeout;
772 #define RBD_QUEUE_DEPTH_DEFAULT BLKDEV_MAX_RQ
773 #define RBD_LOCK_TIMEOUT_DEFAULT 0 /* no timeout */
774 #define RBD_READ_ONLY_DEFAULT false
775 #define RBD_LOCK_ON_READ_DEFAULT false
776 #define RBD_EXCLUSIVE_DEFAULT false
777 #define RBD_TRIM_DEFAULT true
779 static int parse_rbd_opts_token(char *c, void *private)
781 struct rbd_options *rbd_opts = private;
782 substring_t argstr[MAX_OPT_ARGS];
783 int token, intval, ret;
785 token = match_token(c, rbd_opts_tokens, argstr);
786 if (token < Opt_last_int) {
787 ret = match_int(&argstr[0], &intval);
789 pr_err("bad mount option arg (not int) at '%s'\n", c);
792 dout("got int token %d val %d\n", token, intval);
793 } else if (token > Opt_last_int && token < Opt_last_string) {
794 dout("got string token %d val %s\n", token, argstr[0].from);
796 dout("got token %d\n", token);
800 case Opt_queue_depth:
802 pr_err("queue_depth out of range\n");
805 rbd_opts->queue_depth = intval;
807 case Opt_lock_timeout:
808 /* 0 is "wait forever" (i.e. infinite timeout) */
809 if (intval < 0 || intval > INT_MAX / 1000) {
810 pr_err("lock_timeout out of range\n");
813 rbd_opts->lock_timeout = msecs_to_jiffies(intval * 1000);
816 rbd_opts->read_only = true;
819 rbd_opts->read_only = false;
821 case Opt_lock_on_read:
822 rbd_opts->lock_on_read = true;
825 rbd_opts->exclusive = true;
828 rbd_opts->trim = false;
831 /* libceph prints "bad option" msg */
838 static char* obj_op_name(enum obj_operation_type op_type)
853 * Destroy ceph client
855 * Caller must hold rbd_client_list_lock.
857 static void rbd_client_release(struct kref *kref)
859 struct rbd_client *rbdc = container_of(kref, struct rbd_client, kref);
861 dout("%s: rbdc %p\n", __func__, rbdc);
862 spin_lock(&rbd_client_list_lock);
863 list_del(&rbdc->node);
864 spin_unlock(&rbd_client_list_lock);
866 ceph_destroy_client(rbdc->client);
871 * Drop reference to ceph client node. If it's not referenced anymore, release
874 static void rbd_put_client(struct rbd_client *rbdc)
877 kref_put(&rbdc->kref, rbd_client_release);
880 static int wait_for_latest_osdmap(struct ceph_client *client)
885 ret = ceph_monc_get_version(&client->monc, "osdmap", &newest_epoch);
889 if (client->osdc.osdmap->epoch >= newest_epoch)
892 ceph_osdc_maybe_request_map(&client->osdc);
893 return ceph_monc_wait_osdmap(&client->monc, newest_epoch,
894 client->options->mount_timeout);
898 * Get a ceph client with specific addr and configuration, if one does
899 * not exist create it. Either way, ceph_opts is consumed by this
902 static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts)
904 struct rbd_client *rbdc;
907 mutex_lock_nested(&client_mutex, SINGLE_DEPTH_NESTING);
908 rbdc = rbd_client_find(ceph_opts);
910 ceph_destroy_options(ceph_opts);
913 * Using an existing client. Make sure ->pg_pools is up to
914 * date before we look up the pool id in do_rbd_add().
916 ret = wait_for_latest_osdmap(rbdc->client);
918 rbd_warn(NULL, "failed to get latest osdmap: %d", ret);
919 rbd_put_client(rbdc);
923 rbdc = rbd_client_create(ceph_opts);
925 mutex_unlock(&client_mutex);
930 static bool rbd_image_format_valid(u32 image_format)
932 return image_format == 1 || image_format == 2;
935 static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk *ondisk)
940 /* The header has to start with the magic rbd header text */
941 if (memcmp(&ondisk->text, RBD_HEADER_TEXT, sizeof (RBD_HEADER_TEXT)))
944 /* The bio layer requires at least sector-sized I/O */
946 if (ondisk->options.order < SECTOR_SHIFT)
949 /* If we use u64 in a few spots we may be able to loosen this */
951 if (ondisk->options.order > 8 * sizeof (int) - 1)
955 * The size of a snapshot header has to fit in a size_t, and
956 * that limits the number of snapshots.
958 snap_count = le32_to_cpu(ondisk->snap_count);
959 size = SIZE_MAX - sizeof (struct ceph_snap_context);
960 if (snap_count > size / sizeof (__le64))
964 * Not only that, but the size of the entire the snapshot
965 * header must also be representable in a size_t.
967 size -= snap_count * sizeof (__le64);
968 if ((u64) size < le64_to_cpu(ondisk->snap_names_len))
975 * returns the size of an object in the image
977 static u32 rbd_obj_bytes(struct rbd_image_header *header)
979 return 1U << header->obj_order;
982 static void rbd_init_layout(struct rbd_device *rbd_dev)
984 if (rbd_dev->header.stripe_unit == 0 ||
985 rbd_dev->header.stripe_count == 0) {
986 rbd_dev->header.stripe_unit = rbd_obj_bytes(&rbd_dev->header);
987 rbd_dev->header.stripe_count = 1;
990 rbd_dev->layout.stripe_unit = rbd_dev->header.stripe_unit;
991 rbd_dev->layout.stripe_count = rbd_dev->header.stripe_count;
992 rbd_dev->layout.object_size = rbd_obj_bytes(&rbd_dev->header);
993 rbd_dev->layout.pool_id = rbd_dev->header.data_pool_id == CEPH_NOPOOL ?
994 rbd_dev->spec->pool_id : rbd_dev->header.data_pool_id;
995 RCU_INIT_POINTER(rbd_dev->layout.pool_ns, NULL);
999 * Fill an rbd image header with information from the given format 1
1002 static int rbd_header_from_disk(struct rbd_device *rbd_dev,
1003 struct rbd_image_header_ondisk *ondisk)
1005 struct rbd_image_header *header = &rbd_dev->header;
1006 bool first_time = header->object_prefix == NULL;
1007 struct ceph_snap_context *snapc;
1008 char *object_prefix = NULL;
1009 char *snap_names = NULL;
1010 u64 *snap_sizes = NULL;
1015 /* Allocate this now to avoid having to handle failure below */
1018 object_prefix = kstrndup(ondisk->object_prefix,
1019 sizeof(ondisk->object_prefix),
1025 /* Allocate the snapshot context and fill it in */
1027 snap_count = le32_to_cpu(ondisk->snap_count);
1028 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
1031 snapc->seq = le64_to_cpu(ondisk->snap_seq);
1033 struct rbd_image_snap_ondisk *snaps;
1034 u64 snap_names_len = le64_to_cpu(ondisk->snap_names_len);
1036 /* We'll keep a copy of the snapshot names... */
1038 if (snap_names_len > (u64)SIZE_MAX)
1040 snap_names = kmalloc(snap_names_len, GFP_KERNEL);
1044 /* ...as well as the array of their sizes. */
1045 snap_sizes = kmalloc_array(snap_count,
1046 sizeof(*header->snap_sizes),
1052 * Copy the names, and fill in each snapshot's id
1055 * Note that rbd_dev_v1_header_info() guarantees the
1056 * ondisk buffer we're working with has
1057 * snap_names_len bytes beyond the end of the
1058 * snapshot id array, this memcpy() is safe.
1060 memcpy(snap_names, &ondisk->snaps[snap_count], snap_names_len);
1061 snaps = ondisk->snaps;
1062 for (i = 0; i < snap_count; i++) {
1063 snapc->snaps[i] = le64_to_cpu(snaps[i].id);
1064 snap_sizes[i] = le64_to_cpu(snaps[i].image_size);
1068 /* We won't fail any more, fill in the header */
1071 header->object_prefix = object_prefix;
1072 header->obj_order = ondisk->options.order;
1073 rbd_init_layout(rbd_dev);
1075 ceph_put_snap_context(header->snapc);
1076 kfree(header->snap_names);
1077 kfree(header->snap_sizes);
1080 /* The remaining fields always get updated (when we refresh) */
1082 header->image_size = le64_to_cpu(ondisk->image_size);
1083 header->snapc = snapc;
1084 header->snap_names = snap_names;
1085 header->snap_sizes = snap_sizes;
1093 ceph_put_snap_context(snapc);
1094 kfree(object_prefix);
1099 static const char *_rbd_dev_v1_snap_name(struct rbd_device *rbd_dev, u32 which)
1101 const char *snap_name;
1103 rbd_assert(which < rbd_dev->header.snapc->num_snaps);
1105 /* Skip over names until we find the one we are looking for */
1107 snap_name = rbd_dev->header.snap_names;
1109 snap_name += strlen(snap_name) + 1;
1111 return kstrdup(snap_name, GFP_KERNEL);
1115 * Snapshot id comparison function for use with qsort()/bsearch().
1116 * Note that result is for snapshots in *descending* order.
1118 static int snapid_compare_reverse(const void *s1, const void *s2)
1120 u64 snap_id1 = *(u64 *)s1;
1121 u64 snap_id2 = *(u64 *)s2;
1123 if (snap_id1 < snap_id2)
1125 return snap_id1 == snap_id2 ? 0 : -1;
1129 * Search a snapshot context to see if the given snapshot id is
1132 * Returns the position of the snapshot id in the array if it's found,
1133 * or BAD_SNAP_INDEX otherwise.
1135 * Note: The snapshot array is in kept sorted (by the osd) in
1136 * reverse order, highest snapshot id first.
1138 static u32 rbd_dev_snap_index(struct rbd_device *rbd_dev, u64 snap_id)
1140 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
1143 found = bsearch(&snap_id, &snapc->snaps, snapc->num_snaps,
1144 sizeof (snap_id), snapid_compare_reverse);
1146 return found ? (u32)(found - &snapc->snaps[0]) : BAD_SNAP_INDEX;
1149 static const char *rbd_dev_v1_snap_name(struct rbd_device *rbd_dev,
1153 const char *snap_name;
1155 which = rbd_dev_snap_index(rbd_dev, snap_id);
1156 if (which == BAD_SNAP_INDEX)
1157 return ERR_PTR(-ENOENT);
1159 snap_name = _rbd_dev_v1_snap_name(rbd_dev, which);
1160 return snap_name ? snap_name : ERR_PTR(-ENOMEM);
1163 static const char *rbd_snap_name(struct rbd_device *rbd_dev, u64 snap_id)
1165 if (snap_id == CEPH_NOSNAP)
1166 return RBD_SNAP_HEAD_NAME;
1168 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1169 if (rbd_dev->image_format == 1)
1170 return rbd_dev_v1_snap_name(rbd_dev, snap_id);
1172 return rbd_dev_v2_snap_name(rbd_dev, snap_id);
1175 static int rbd_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
1178 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1179 if (snap_id == CEPH_NOSNAP) {
1180 *snap_size = rbd_dev->header.image_size;
1181 } else if (rbd_dev->image_format == 1) {
1184 which = rbd_dev_snap_index(rbd_dev, snap_id);
1185 if (which == BAD_SNAP_INDEX)
1188 *snap_size = rbd_dev->header.snap_sizes[which];
1193 ret = _rbd_dev_v2_snap_size(rbd_dev, snap_id, NULL, &size);
1202 static int rbd_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
1205 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1206 if (snap_id == CEPH_NOSNAP) {
1207 *snap_features = rbd_dev->header.features;
1208 } else if (rbd_dev->image_format == 1) {
1209 *snap_features = 0; /* No features for format 1 */
1214 ret = _rbd_dev_v2_snap_features(rbd_dev, snap_id, &features);
1218 *snap_features = features;
1223 static int rbd_dev_mapping_set(struct rbd_device *rbd_dev)
1225 u64 snap_id = rbd_dev->spec->snap_id;
1230 ret = rbd_snap_size(rbd_dev, snap_id, &size);
1233 ret = rbd_snap_features(rbd_dev, snap_id, &features);
1237 rbd_dev->mapping.size = size;
1238 rbd_dev->mapping.features = features;
1243 static void rbd_dev_mapping_clear(struct rbd_device *rbd_dev)
1245 rbd_dev->mapping.size = 0;
1246 rbd_dev->mapping.features = 0;
1249 static void zero_bvec(struct bio_vec *bv)
1252 unsigned long flags;
1254 buf = bvec_kmap_irq(bv, &flags);
1255 memset(buf, 0, bv->bv_len);
1256 flush_dcache_page(bv->bv_page);
1257 bvec_kunmap_irq(buf, &flags);
1260 static void zero_bios(struct ceph_bio_iter *bio_pos, u32 off, u32 bytes)
1262 struct ceph_bio_iter it = *bio_pos;
1264 ceph_bio_iter_advance(&it, off);
1265 ceph_bio_iter_advance_step(&it, bytes, ({
1270 static void zero_bvecs(struct ceph_bvec_iter *bvec_pos, u32 off, u32 bytes)
1272 struct ceph_bvec_iter it = *bvec_pos;
1274 ceph_bvec_iter_advance(&it, off);
1275 ceph_bvec_iter_advance_step(&it, bytes, ({
1281 * Zero a range in @obj_req data buffer defined by a bio (list) or
1282 * (private) bio_vec array.
1284 * @off is relative to the start of the data buffer.
1286 static void rbd_obj_zero_range(struct rbd_obj_request *obj_req, u32 off,
1289 switch (obj_req->img_request->data_type) {
1290 case OBJ_REQUEST_BIO:
1291 zero_bios(&obj_req->bio_pos, off, bytes);
1293 case OBJ_REQUEST_BVECS:
1294 case OBJ_REQUEST_OWN_BVECS:
1295 zero_bvecs(&obj_req->bvec_pos, off, bytes);
1302 static void rbd_obj_request_destroy(struct kref *kref);
1303 static void rbd_obj_request_put(struct rbd_obj_request *obj_request)
1305 rbd_assert(obj_request != NULL);
1306 dout("%s: obj %p (was %d)\n", __func__, obj_request,
1307 kref_read(&obj_request->kref));
1308 kref_put(&obj_request->kref, rbd_obj_request_destroy);
1311 static void rbd_img_request_get(struct rbd_img_request *img_request)
1313 dout("%s: img %p (was %d)\n", __func__, img_request,
1314 kref_read(&img_request->kref));
1315 kref_get(&img_request->kref);
1318 static void rbd_img_request_destroy(struct kref *kref);
1319 static void rbd_img_request_put(struct rbd_img_request *img_request)
1321 rbd_assert(img_request != NULL);
1322 dout("%s: img %p (was %d)\n", __func__, img_request,
1323 kref_read(&img_request->kref));
1324 kref_put(&img_request->kref, rbd_img_request_destroy);
1327 static inline void rbd_img_obj_request_add(struct rbd_img_request *img_request,
1328 struct rbd_obj_request *obj_request)
1330 rbd_assert(obj_request->img_request == NULL);
1332 /* Image request now owns object's original reference */
1333 obj_request->img_request = img_request;
1334 img_request->obj_request_count++;
1335 img_request->pending_count++;
1336 dout("%s: img %p obj %p\n", __func__, img_request, obj_request);
1339 static inline void rbd_img_obj_request_del(struct rbd_img_request *img_request,
1340 struct rbd_obj_request *obj_request)
1342 dout("%s: img %p obj %p\n", __func__, img_request, obj_request);
1343 list_del(&obj_request->ex.oe_item);
1344 rbd_assert(img_request->obj_request_count > 0);
1345 img_request->obj_request_count--;
1346 rbd_assert(obj_request->img_request == img_request);
1347 rbd_obj_request_put(obj_request);
1350 static void rbd_obj_request_submit(struct rbd_obj_request *obj_request)
1352 struct ceph_osd_request *osd_req = obj_request->osd_req;
1354 dout("%s %p object_no %016llx %llu~%llu osd_req %p\n", __func__,
1355 obj_request, obj_request->ex.oe_objno, obj_request->ex.oe_off,
1356 obj_request->ex.oe_len, osd_req);
1357 ceph_osdc_start_request(osd_req->r_osdc, osd_req, false);
1361 * The default/initial value for all image request flags is 0. Each
1362 * is conditionally set to 1 at image request initialization time
1363 * and currently never change thereafter.
1365 static void img_request_layered_set(struct rbd_img_request *img_request)
1367 set_bit(IMG_REQ_LAYERED, &img_request->flags);
1371 static void img_request_layered_clear(struct rbd_img_request *img_request)
1373 clear_bit(IMG_REQ_LAYERED, &img_request->flags);
1377 static bool img_request_layered_test(struct rbd_img_request *img_request)
1380 return test_bit(IMG_REQ_LAYERED, &img_request->flags) != 0;
1383 static bool rbd_obj_is_entire(struct rbd_obj_request *obj_req)
1385 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
1387 return !obj_req->ex.oe_off &&
1388 obj_req->ex.oe_len == rbd_dev->layout.object_size;
1391 static bool rbd_obj_is_tail(struct rbd_obj_request *obj_req)
1393 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
1395 return obj_req->ex.oe_off + obj_req->ex.oe_len ==
1396 rbd_dev->layout.object_size;
1399 static u64 rbd_obj_img_extents_bytes(struct rbd_obj_request *obj_req)
1401 return ceph_file_extents_bytes(obj_req->img_extents,
1402 obj_req->num_img_extents);
1405 static bool rbd_img_is_write(struct rbd_img_request *img_req)
1407 switch (img_req->op_type) {
1411 case OBJ_OP_DISCARD:
1418 static void rbd_obj_handle_request(struct rbd_obj_request *obj_req);
1420 static void rbd_osd_req_callback(struct ceph_osd_request *osd_req)
1422 struct rbd_obj_request *obj_req = osd_req->r_priv;
1424 dout("%s osd_req %p result %d for obj_req %p\n", __func__, osd_req,
1425 osd_req->r_result, obj_req);
1426 rbd_assert(osd_req == obj_req->osd_req);
1428 obj_req->result = osd_req->r_result < 0 ? osd_req->r_result : 0;
1429 if (!obj_req->result && !rbd_img_is_write(obj_req->img_request))
1430 obj_req->xferred = osd_req->r_result;
1433 * Writes aren't allowed to return a data payload. In some
1434 * guarded write cases (e.g. stat + zero on an empty object)
1435 * a stat response makes it through, but we don't care.
1437 obj_req->xferred = 0;
1439 rbd_obj_handle_request(obj_req);
1442 static void rbd_osd_req_format_read(struct rbd_obj_request *obj_request)
1444 struct ceph_osd_request *osd_req = obj_request->osd_req;
1446 osd_req->r_flags = CEPH_OSD_FLAG_READ;
1447 osd_req->r_snapid = obj_request->img_request->snap_id;
1450 static void rbd_osd_req_format_write(struct rbd_obj_request *obj_request)
1452 struct ceph_osd_request *osd_req = obj_request->osd_req;
1454 osd_req->r_flags = CEPH_OSD_FLAG_WRITE;
1455 ktime_get_real_ts(&osd_req->r_mtime);
1456 osd_req->r_data_offset = obj_request->ex.oe_off;
1459 static struct ceph_osd_request *
1460 rbd_osd_req_create(struct rbd_obj_request *obj_req, unsigned int num_ops)
1462 struct rbd_img_request *img_req = obj_req->img_request;
1463 struct rbd_device *rbd_dev = img_req->rbd_dev;
1464 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
1465 struct ceph_osd_request *req;
1466 const char *name_format = rbd_dev->image_format == 1 ?
1467 RBD_V1_DATA_FORMAT : RBD_V2_DATA_FORMAT;
1469 req = ceph_osdc_alloc_request(osdc,
1470 (rbd_img_is_write(img_req) ? img_req->snapc : NULL),
1471 num_ops, false, GFP_NOIO);
1475 req->r_callback = rbd_osd_req_callback;
1476 req->r_priv = obj_req;
1478 req->r_base_oloc.pool = rbd_dev->layout.pool_id;
1479 if (ceph_oid_aprintf(&req->r_base_oid, GFP_NOIO, name_format,
1480 rbd_dev->header.object_prefix, obj_req->ex.oe_objno))
1483 if (ceph_osdc_alloc_messages(req, GFP_NOIO))
1489 ceph_osdc_put_request(req);
1493 static void rbd_osd_req_destroy(struct ceph_osd_request *osd_req)
1495 ceph_osdc_put_request(osd_req);
1498 static struct rbd_obj_request *rbd_obj_request_create(void)
1500 struct rbd_obj_request *obj_request;
1502 obj_request = kmem_cache_zalloc(rbd_obj_request_cache, GFP_NOIO);
1506 ceph_object_extent_init(&obj_request->ex);
1507 kref_init(&obj_request->kref);
1509 dout("%s %p\n", __func__, obj_request);
1513 static void rbd_obj_request_destroy(struct kref *kref)
1515 struct rbd_obj_request *obj_request;
1518 obj_request = container_of(kref, struct rbd_obj_request, kref);
1520 dout("%s: obj %p\n", __func__, obj_request);
1522 if (obj_request->osd_req)
1523 rbd_osd_req_destroy(obj_request->osd_req);
1525 switch (obj_request->img_request->data_type) {
1526 case OBJ_REQUEST_NODATA:
1527 case OBJ_REQUEST_BIO:
1528 case OBJ_REQUEST_BVECS:
1529 break; /* Nothing to do */
1530 case OBJ_REQUEST_OWN_BVECS:
1531 kfree(obj_request->bvec_pos.bvecs);
1537 kfree(obj_request->img_extents);
1538 if (obj_request->copyup_bvecs) {
1539 for (i = 0; i < obj_request->copyup_bvec_count; i++) {
1540 if (obj_request->copyup_bvecs[i].bv_page)
1541 __free_page(obj_request->copyup_bvecs[i].bv_page);
1543 kfree(obj_request->copyup_bvecs);
1546 kmem_cache_free(rbd_obj_request_cache, obj_request);
1549 /* It's OK to call this for a device with no parent */
1551 static void rbd_spec_put(struct rbd_spec *spec);
1552 static void rbd_dev_unparent(struct rbd_device *rbd_dev)
1554 rbd_dev_remove_parent(rbd_dev);
1555 rbd_spec_put(rbd_dev->parent_spec);
1556 rbd_dev->parent_spec = NULL;
1557 rbd_dev->parent_overlap = 0;
1561 * Parent image reference counting is used to determine when an
1562 * image's parent fields can be safely torn down--after there are no
1563 * more in-flight requests to the parent image. When the last
1564 * reference is dropped, cleaning them up is safe.
1566 static void rbd_dev_parent_put(struct rbd_device *rbd_dev)
1570 if (!rbd_dev->parent_spec)
1573 counter = atomic_dec_return_safe(&rbd_dev->parent_ref);
1577 /* Last reference; clean up parent data structures */
1580 rbd_dev_unparent(rbd_dev);
1582 rbd_warn(rbd_dev, "parent reference underflow");
1586 * If an image has a non-zero parent overlap, get a reference to its
1589 * Returns true if the rbd device has a parent with a non-zero
1590 * overlap and a reference for it was successfully taken, or
1593 static bool rbd_dev_parent_get(struct rbd_device *rbd_dev)
1597 if (!rbd_dev->parent_spec)
1600 down_read(&rbd_dev->header_rwsem);
1601 if (rbd_dev->parent_overlap)
1602 counter = atomic_inc_return_safe(&rbd_dev->parent_ref);
1603 up_read(&rbd_dev->header_rwsem);
1606 rbd_warn(rbd_dev, "parent reference overflow");
1612 * Caller is responsible for filling in the list of object requests
1613 * that comprises the image request, and the Linux request pointer
1614 * (if there is one).
1616 static struct rbd_img_request *rbd_img_request_create(
1617 struct rbd_device *rbd_dev,
1618 enum obj_operation_type op_type,
1619 struct ceph_snap_context *snapc)
1621 struct rbd_img_request *img_request;
1623 img_request = kmem_cache_zalloc(rbd_img_request_cache, GFP_NOIO);
1627 img_request->rbd_dev = rbd_dev;
1628 img_request->op_type = op_type;
1629 if (!rbd_img_is_write(img_request))
1630 img_request->snap_id = rbd_dev->spec->snap_id;
1632 img_request->snapc = snapc;
1634 if (rbd_dev_parent_get(rbd_dev))
1635 img_request_layered_set(img_request);
1637 spin_lock_init(&img_request->completion_lock);
1638 INIT_LIST_HEAD(&img_request->object_extents);
1639 kref_init(&img_request->kref);
1641 dout("%s: rbd_dev %p %s -> img %p\n", __func__, rbd_dev,
1642 obj_op_name(op_type), img_request);
1646 static void rbd_img_request_destroy(struct kref *kref)
1648 struct rbd_img_request *img_request;
1649 struct rbd_obj_request *obj_request;
1650 struct rbd_obj_request *next_obj_request;
1652 img_request = container_of(kref, struct rbd_img_request, kref);
1654 dout("%s: img %p\n", __func__, img_request);
1656 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
1657 rbd_img_obj_request_del(img_request, obj_request);
1658 rbd_assert(img_request->obj_request_count == 0);
1660 if (img_request_layered_test(img_request)) {
1661 img_request_layered_clear(img_request);
1662 rbd_dev_parent_put(img_request->rbd_dev);
1665 if (rbd_img_is_write(img_request))
1666 ceph_put_snap_context(img_request->snapc);
1668 kmem_cache_free(rbd_img_request_cache, img_request);
1671 static void prune_extents(struct ceph_file_extent *img_extents,
1672 u32 *num_img_extents, u64 overlap)
1674 u32 cnt = *num_img_extents;
1676 /* drop extents completely beyond the overlap */
1677 while (cnt && img_extents[cnt - 1].fe_off >= overlap)
1681 struct ceph_file_extent *ex = &img_extents[cnt - 1];
1683 /* trim final overlapping extent */
1684 if (ex->fe_off + ex->fe_len > overlap)
1685 ex->fe_len = overlap - ex->fe_off;
1688 *num_img_extents = cnt;
1692 * Determine the byte range(s) covered by either just the object extent
1693 * or the entire object in the parent image.
1695 static int rbd_obj_calc_img_extents(struct rbd_obj_request *obj_req,
1698 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
1701 if (!rbd_dev->parent_overlap)
1704 ret = ceph_extent_to_file(&rbd_dev->layout, obj_req->ex.oe_objno,
1705 entire ? 0 : obj_req->ex.oe_off,
1706 entire ? rbd_dev->layout.object_size :
1708 &obj_req->img_extents,
1709 &obj_req->num_img_extents);
1713 prune_extents(obj_req->img_extents, &obj_req->num_img_extents,
1714 rbd_dev->parent_overlap);
1718 static void rbd_osd_req_setup_data(struct rbd_obj_request *obj_req, u32 which)
1720 switch (obj_req->img_request->data_type) {
1721 case OBJ_REQUEST_BIO:
1722 osd_req_op_extent_osd_data_bio(obj_req->osd_req, which,
1724 obj_req->ex.oe_len);
1726 case OBJ_REQUEST_BVECS:
1727 case OBJ_REQUEST_OWN_BVECS:
1728 rbd_assert(obj_req->bvec_pos.iter.bi_size ==
1729 obj_req->ex.oe_len);
1730 rbd_assert(obj_req->bvec_idx == obj_req->bvec_count);
1731 osd_req_op_extent_osd_data_bvec_pos(obj_req->osd_req, which,
1732 &obj_req->bvec_pos);
1739 static int rbd_obj_setup_read(struct rbd_obj_request *obj_req)
1741 obj_req->osd_req = rbd_osd_req_create(obj_req, 1);
1742 if (!obj_req->osd_req)
1745 osd_req_op_extent_init(obj_req->osd_req, 0, CEPH_OSD_OP_READ,
1746 obj_req->ex.oe_off, obj_req->ex.oe_len, 0, 0);
1747 rbd_osd_req_setup_data(obj_req, 0);
1749 rbd_osd_req_format_read(obj_req);
1753 static int __rbd_obj_setup_stat(struct rbd_obj_request *obj_req,
1756 struct page **pages;
1759 * The response data for a STAT call consists of:
1766 pages = ceph_alloc_page_vector(1, GFP_NOIO);
1768 return PTR_ERR(pages);
1770 osd_req_op_init(obj_req->osd_req, which, CEPH_OSD_OP_STAT, 0);
1771 osd_req_op_raw_data_in_pages(obj_req->osd_req, which, pages,
1772 8 + sizeof(struct ceph_timespec),
1777 static void __rbd_obj_setup_write(struct rbd_obj_request *obj_req,
1780 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
1783 osd_req_op_alloc_hint_init(obj_req->osd_req, which++,
1784 rbd_dev->layout.object_size,
1785 rbd_dev->layout.object_size);
1787 if (rbd_obj_is_entire(obj_req))
1788 opcode = CEPH_OSD_OP_WRITEFULL;
1790 opcode = CEPH_OSD_OP_WRITE;
1792 osd_req_op_extent_init(obj_req->osd_req, which, opcode,
1793 obj_req->ex.oe_off, obj_req->ex.oe_len, 0, 0);
1794 rbd_osd_req_setup_data(obj_req, which++);
1796 rbd_assert(which == obj_req->osd_req->r_num_ops);
1797 rbd_osd_req_format_write(obj_req);
1800 static int rbd_obj_setup_write(struct rbd_obj_request *obj_req)
1802 unsigned int num_osd_ops, which = 0;
1805 /* reverse map the entire object onto the parent */
1806 ret = rbd_obj_calc_img_extents(obj_req, true);
1810 if (obj_req->num_img_extents) {
1811 obj_req->write_state = RBD_OBJ_WRITE_GUARD;
1812 num_osd_ops = 3; /* stat + setallochint + write/writefull */
1814 obj_req->write_state = RBD_OBJ_WRITE_FLAT;
1815 num_osd_ops = 2; /* setallochint + write/writefull */
1818 obj_req->osd_req = rbd_osd_req_create(obj_req, num_osd_ops);
1819 if (!obj_req->osd_req)
1822 if (obj_req->num_img_extents) {
1823 ret = __rbd_obj_setup_stat(obj_req, which++);
1828 __rbd_obj_setup_write(obj_req, which);
1832 static void __rbd_obj_setup_discard(struct rbd_obj_request *obj_req,
1837 if (rbd_obj_is_entire(obj_req)) {
1838 if (obj_req->num_img_extents) {
1839 osd_req_op_init(obj_req->osd_req, which++,
1840 CEPH_OSD_OP_CREATE, 0);
1841 opcode = CEPH_OSD_OP_TRUNCATE;
1843 osd_req_op_init(obj_req->osd_req, which++,
1844 CEPH_OSD_OP_DELETE, 0);
1847 } else if (rbd_obj_is_tail(obj_req)) {
1848 opcode = CEPH_OSD_OP_TRUNCATE;
1850 opcode = CEPH_OSD_OP_ZERO;
1854 osd_req_op_extent_init(obj_req->osd_req, which++, opcode,
1855 obj_req->ex.oe_off, obj_req->ex.oe_len,
1858 rbd_assert(which == obj_req->osd_req->r_num_ops);
1859 rbd_osd_req_format_write(obj_req);
1862 static int rbd_obj_setup_discard(struct rbd_obj_request *obj_req)
1864 unsigned int num_osd_ops, which = 0;
1867 /* reverse map the entire object onto the parent */
1868 ret = rbd_obj_calc_img_extents(obj_req, true);
1872 if (rbd_obj_is_entire(obj_req)) {
1873 obj_req->write_state = RBD_OBJ_WRITE_FLAT;
1874 if (obj_req->num_img_extents)
1875 num_osd_ops = 2; /* create + truncate */
1877 num_osd_ops = 1; /* delete */
1879 if (obj_req->num_img_extents) {
1880 obj_req->write_state = RBD_OBJ_WRITE_GUARD;
1881 num_osd_ops = 2; /* stat + truncate/zero */
1883 obj_req->write_state = RBD_OBJ_WRITE_FLAT;
1884 num_osd_ops = 1; /* truncate/zero */
1888 obj_req->osd_req = rbd_osd_req_create(obj_req, num_osd_ops);
1889 if (!obj_req->osd_req)
1892 if (!rbd_obj_is_entire(obj_req) && obj_req->num_img_extents) {
1893 ret = __rbd_obj_setup_stat(obj_req, which++);
1898 __rbd_obj_setup_discard(obj_req, which);
1903 * For each object request in @img_req, allocate an OSD request, add
1904 * individual OSD ops and prepare them for submission. The number of
1905 * OSD ops depends on op_type and the overlap point (if any).
1907 static int __rbd_img_fill_request(struct rbd_img_request *img_req)
1909 struct rbd_obj_request *obj_req;
1912 for_each_obj_request(img_req, obj_req) {
1913 switch (img_req->op_type) {
1915 ret = rbd_obj_setup_read(obj_req);
1918 ret = rbd_obj_setup_write(obj_req);
1920 case OBJ_OP_DISCARD:
1921 ret = rbd_obj_setup_discard(obj_req);
1933 union rbd_img_fill_iter {
1934 struct ceph_bio_iter bio_iter;
1935 struct ceph_bvec_iter bvec_iter;
1938 struct rbd_img_fill_ctx {
1939 enum obj_request_type pos_type;
1940 union rbd_img_fill_iter *pos;
1941 union rbd_img_fill_iter iter;
1942 ceph_object_extent_fn_t set_pos_fn;
1943 ceph_object_extent_fn_t count_fn;
1944 ceph_object_extent_fn_t copy_fn;
1947 static struct ceph_object_extent *alloc_object_extent(void *arg)
1949 struct rbd_img_request *img_req = arg;
1950 struct rbd_obj_request *obj_req;
1952 obj_req = rbd_obj_request_create();
1956 rbd_img_obj_request_add(img_req, obj_req);
1957 return &obj_req->ex;
1961 * While su != os && sc == 1 is technically not fancy (it's the same
1962 * layout as su == os && sc == 1), we can't use the nocopy path for it
1963 * because ->set_pos_fn() should be called only once per object.
1964 * ceph_file_to_extents() invokes action_fn once per stripe unit, so
1965 * treat su != os && sc == 1 as fancy.
1967 static bool rbd_layout_is_fancy(struct ceph_file_layout *l)
1969 return l->stripe_unit != l->object_size;
1972 static int rbd_img_fill_request_nocopy(struct rbd_img_request *img_req,
1973 struct ceph_file_extent *img_extents,
1974 u32 num_img_extents,
1975 struct rbd_img_fill_ctx *fctx)
1980 img_req->data_type = fctx->pos_type;
1983 * Create object requests and set each object request's starting
1984 * position in the provided bio (list) or bio_vec array.
1986 fctx->iter = *fctx->pos;
1987 for (i = 0; i < num_img_extents; i++) {
1988 ret = ceph_file_to_extents(&img_req->rbd_dev->layout,
1989 img_extents[i].fe_off,
1990 img_extents[i].fe_len,
1991 &img_req->object_extents,
1992 alloc_object_extent, img_req,
1993 fctx->set_pos_fn, &fctx->iter);
1998 return __rbd_img_fill_request(img_req);
2002 * Map a list of image extents to a list of object extents, create the
2003 * corresponding object requests (normally each to a different object,
2004 * but not always) and add them to @img_req. For each object request,
2005 * set up its data descriptor to point to the corresponding chunk(s) of
2006 * @fctx->pos data buffer.
2008 * Because ceph_file_to_extents() will merge adjacent object extents
2009 * together, each object request's data descriptor may point to multiple
2010 * different chunks of @fctx->pos data buffer.
2012 * @fctx->pos data buffer is assumed to be large enough.
2014 static int rbd_img_fill_request(struct rbd_img_request *img_req,
2015 struct ceph_file_extent *img_extents,
2016 u32 num_img_extents,
2017 struct rbd_img_fill_ctx *fctx)
2019 struct rbd_device *rbd_dev = img_req->rbd_dev;
2020 struct rbd_obj_request *obj_req;
2024 if (fctx->pos_type == OBJ_REQUEST_NODATA ||
2025 !rbd_layout_is_fancy(&rbd_dev->layout))
2026 return rbd_img_fill_request_nocopy(img_req, img_extents,
2027 num_img_extents, fctx);
2029 img_req->data_type = OBJ_REQUEST_OWN_BVECS;
2032 * Create object requests and determine ->bvec_count for each object
2033 * request. Note that ->bvec_count sum over all object requests may
2034 * be greater than the number of bio_vecs in the provided bio (list)
2035 * or bio_vec array because when mapped, those bio_vecs can straddle
2036 * stripe unit boundaries.
2038 fctx->iter = *fctx->pos;
2039 for (i = 0; i < num_img_extents; i++) {
2040 ret = ceph_file_to_extents(&rbd_dev->layout,
2041 img_extents[i].fe_off,
2042 img_extents[i].fe_len,
2043 &img_req->object_extents,
2044 alloc_object_extent, img_req,
2045 fctx->count_fn, &fctx->iter);
2050 for_each_obj_request(img_req, obj_req) {
2051 obj_req->bvec_pos.bvecs = kmalloc_array(obj_req->bvec_count,
2052 sizeof(*obj_req->bvec_pos.bvecs),
2054 if (!obj_req->bvec_pos.bvecs)
2059 * Fill in each object request's private bio_vec array, splitting and
2060 * rearranging the provided bio_vecs in stripe unit chunks as needed.
2062 fctx->iter = *fctx->pos;
2063 for (i = 0; i < num_img_extents; i++) {
2064 ret = ceph_iterate_extents(&rbd_dev->layout,
2065 img_extents[i].fe_off,
2066 img_extents[i].fe_len,
2067 &img_req->object_extents,
2068 fctx->copy_fn, &fctx->iter);
2073 return __rbd_img_fill_request(img_req);
2076 static int rbd_img_fill_nodata(struct rbd_img_request *img_req,
2079 struct ceph_file_extent ex = { off, len };
2080 union rbd_img_fill_iter dummy;
2081 struct rbd_img_fill_ctx fctx = {
2082 .pos_type = OBJ_REQUEST_NODATA,
2086 return rbd_img_fill_request(img_req, &ex, 1, &fctx);
2089 static void set_bio_pos(struct ceph_object_extent *ex, u32 bytes, void *arg)
2091 struct rbd_obj_request *obj_req =
2092 container_of(ex, struct rbd_obj_request, ex);
2093 struct ceph_bio_iter *it = arg;
2095 dout("%s objno %llu bytes %u\n", __func__, ex->oe_objno, bytes);
2096 obj_req->bio_pos = *it;
2097 ceph_bio_iter_advance(it, bytes);
2100 static void count_bio_bvecs(struct ceph_object_extent *ex, u32 bytes, void *arg)
2102 struct rbd_obj_request *obj_req =
2103 container_of(ex, struct rbd_obj_request, ex);
2104 struct ceph_bio_iter *it = arg;
2106 dout("%s objno %llu bytes %u\n", __func__, ex->oe_objno, bytes);
2107 ceph_bio_iter_advance_step(it, bytes, ({
2108 obj_req->bvec_count++;
2113 static void copy_bio_bvecs(struct ceph_object_extent *ex, u32 bytes, void *arg)
2115 struct rbd_obj_request *obj_req =
2116 container_of(ex, struct rbd_obj_request, ex);
2117 struct ceph_bio_iter *it = arg;
2119 dout("%s objno %llu bytes %u\n", __func__, ex->oe_objno, bytes);
2120 ceph_bio_iter_advance_step(it, bytes, ({
2121 obj_req->bvec_pos.bvecs[obj_req->bvec_idx++] = bv;
2122 obj_req->bvec_pos.iter.bi_size += bv.bv_len;
2126 static int __rbd_img_fill_from_bio(struct rbd_img_request *img_req,
2127 struct ceph_file_extent *img_extents,
2128 u32 num_img_extents,
2129 struct ceph_bio_iter *bio_pos)
2131 struct rbd_img_fill_ctx fctx = {
2132 .pos_type = OBJ_REQUEST_BIO,
2133 .pos = (union rbd_img_fill_iter *)bio_pos,
2134 .set_pos_fn = set_bio_pos,
2135 .count_fn = count_bio_bvecs,
2136 .copy_fn = copy_bio_bvecs,
2139 return rbd_img_fill_request(img_req, img_extents, num_img_extents,
2143 static int rbd_img_fill_from_bio(struct rbd_img_request *img_req,
2144 u64 off, u64 len, struct bio *bio)
2146 struct ceph_file_extent ex = { off, len };
2147 struct ceph_bio_iter it = { .bio = bio, .iter = bio->bi_iter };
2149 return __rbd_img_fill_from_bio(img_req, &ex, 1, &it);
2152 static void set_bvec_pos(struct ceph_object_extent *ex, u32 bytes, void *arg)
2154 struct rbd_obj_request *obj_req =
2155 container_of(ex, struct rbd_obj_request, ex);
2156 struct ceph_bvec_iter *it = arg;
2158 obj_req->bvec_pos = *it;
2159 ceph_bvec_iter_shorten(&obj_req->bvec_pos, bytes);
2160 ceph_bvec_iter_advance(it, bytes);
2163 static void count_bvecs(struct ceph_object_extent *ex, u32 bytes, void *arg)
2165 struct rbd_obj_request *obj_req =
2166 container_of(ex, struct rbd_obj_request, ex);
2167 struct ceph_bvec_iter *it = arg;
2169 ceph_bvec_iter_advance_step(it, bytes, ({
2170 obj_req->bvec_count++;
2174 static void copy_bvecs(struct ceph_object_extent *ex, u32 bytes, void *arg)
2176 struct rbd_obj_request *obj_req =
2177 container_of(ex, struct rbd_obj_request, ex);
2178 struct ceph_bvec_iter *it = arg;
2180 ceph_bvec_iter_advance_step(it, bytes, ({
2181 obj_req->bvec_pos.bvecs[obj_req->bvec_idx++] = bv;
2182 obj_req->bvec_pos.iter.bi_size += bv.bv_len;
2186 static int __rbd_img_fill_from_bvecs(struct rbd_img_request *img_req,
2187 struct ceph_file_extent *img_extents,
2188 u32 num_img_extents,
2189 struct ceph_bvec_iter *bvec_pos)
2191 struct rbd_img_fill_ctx fctx = {
2192 .pos_type = OBJ_REQUEST_BVECS,
2193 .pos = (union rbd_img_fill_iter *)bvec_pos,
2194 .set_pos_fn = set_bvec_pos,
2195 .count_fn = count_bvecs,
2196 .copy_fn = copy_bvecs,
2199 return rbd_img_fill_request(img_req, img_extents, num_img_extents,
2203 static int rbd_img_fill_from_bvecs(struct rbd_img_request *img_req,
2204 struct ceph_file_extent *img_extents,
2205 u32 num_img_extents,
2206 struct bio_vec *bvecs)
2208 struct ceph_bvec_iter it = {
2210 .iter = { .bi_size = ceph_file_extents_bytes(img_extents,
2214 return __rbd_img_fill_from_bvecs(img_req, img_extents, num_img_extents,
2218 static void rbd_img_request_submit(struct rbd_img_request *img_request)
2220 struct rbd_obj_request *obj_request;
2222 dout("%s: img %p\n", __func__, img_request);
2224 rbd_img_request_get(img_request);
2225 for_each_obj_request(img_request, obj_request)
2226 rbd_obj_request_submit(obj_request);
2228 rbd_img_request_put(img_request);
2231 static int rbd_obj_read_from_parent(struct rbd_obj_request *obj_req)
2233 struct rbd_img_request *img_req = obj_req->img_request;
2234 struct rbd_img_request *child_img_req;
2237 child_img_req = rbd_img_request_create(img_req->rbd_dev->parent,
2242 __set_bit(IMG_REQ_CHILD, &child_img_req->flags);
2243 child_img_req->obj_request = obj_req;
2245 if (!rbd_img_is_write(img_req)) {
2246 switch (img_req->data_type) {
2247 case OBJ_REQUEST_BIO:
2248 ret = __rbd_img_fill_from_bio(child_img_req,
2249 obj_req->img_extents,
2250 obj_req->num_img_extents,
2253 case OBJ_REQUEST_BVECS:
2254 case OBJ_REQUEST_OWN_BVECS:
2255 ret = __rbd_img_fill_from_bvecs(child_img_req,
2256 obj_req->img_extents,
2257 obj_req->num_img_extents,
2258 &obj_req->bvec_pos);
2264 ret = rbd_img_fill_from_bvecs(child_img_req,
2265 obj_req->img_extents,
2266 obj_req->num_img_extents,
2267 obj_req->copyup_bvecs);
2270 rbd_img_request_put(child_img_req);
2274 rbd_img_request_submit(child_img_req);
2278 static bool rbd_obj_handle_read(struct rbd_obj_request *obj_req)
2280 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2283 if (obj_req->result == -ENOENT &&
2284 rbd_dev->parent_overlap && !obj_req->tried_parent) {
2285 /* reverse map this object extent onto the parent */
2286 ret = rbd_obj_calc_img_extents(obj_req, false);
2288 obj_req->result = ret;
2292 if (obj_req->num_img_extents) {
2293 obj_req->tried_parent = true;
2294 ret = rbd_obj_read_from_parent(obj_req);
2296 obj_req->result = ret;
2304 * -ENOENT means a hole in the image -- zero-fill the entire
2305 * length of the request. A short read also implies zero-fill
2306 * to the end of the request. In both cases we update xferred
2307 * count to indicate the whole request was satisfied.
2309 if (obj_req->result == -ENOENT ||
2310 (!obj_req->result && obj_req->xferred < obj_req->ex.oe_len)) {
2311 rbd_assert(!obj_req->xferred || !obj_req->result);
2312 rbd_obj_zero_range(obj_req, obj_req->xferred,
2313 obj_req->ex.oe_len - obj_req->xferred);
2314 obj_req->result = 0;
2315 obj_req->xferred = obj_req->ex.oe_len;
2322 * copyup_bvecs pages are never highmem pages
2324 static bool is_zero_bvecs(struct bio_vec *bvecs, u32 bytes)
2326 struct ceph_bvec_iter it = {
2328 .iter = { .bi_size = bytes },
2331 ceph_bvec_iter_advance_step(&it, bytes, ({
2332 if (memchr_inv(page_address(bv.bv_page) + bv.bv_offset, 0,
2339 static int rbd_obj_issue_copyup(struct rbd_obj_request *obj_req, u32 bytes)
2341 unsigned int num_osd_ops = obj_req->osd_req->r_num_ops;
2343 dout("%s obj_req %p bytes %u\n", __func__, obj_req, bytes);
2344 rbd_assert(obj_req->osd_req->r_ops[0].op == CEPH_OSD_OP_STAT);
2345 rbd_osd_req_destroy(obj_req->osd_req);
2348 * Create a copyup request with the same number of OSD ops as
2349 * the original request. The original request was stat + op(s),
2350 * the new copyup request will be copyup + the same op(s).
2352 obj_req->osd_req = rbd_osd_req_create(obj_req, num_osd_ops);
2353 if (!obj_req->osd_req)
2357 * Only send non-zero copyup data to save some I/O and network
2358 * bandwidth -- zero copyup data is equivalent to the object not
2361 if (is_zero_bvecs(obj_req->copyup_bvecs, bytes)) {
2362 dout("%s obj_req %p detected zeroes\n", __func__, obj_req);
2366 osd_req_op_cls_init(obj_req->osd_req, 0, CEPH_OSD_OP_CALL, "rbd",
2368 osd_req_op_cls_request_data_bvecs(obj_req->osd_req, 0,
2369 obj_req->copyup_bvecs,
2370 obj_req->copyup_bvec_count,
2373 switch (obj_req->img_request->op_type) {
2375 __rbd_obj_setup_write(obj_req, 1);
2377 case OBJ_OP_DISCARD:
2378 rbd_assert(!rbd_obj_is_entire(obj_req));
2379 __rbd_obj_setup_discard(obj_req, 1);
2385 rbd_obj_request_submit(obj_req);
2389 static int setup_copyup_bvecs(struct rbd_obj_request *obj_req, u64 obj_overlap)
2393 rbd_assert(!obj_req->copyup_bvecs);
2394 obj_req->copyup_bvec_count = calc_pages_for(0, obj_overlap);
2395 obj_req->copyup_bvecs = kcalloc(obj_req->copyup_bvec_count,
2396 sizeof(*obj_req->copyup_bvecs),
2398 if (!obj_req->copyup_bvecs)
2401 for (i = 0; i < obj_req->copyup_bvec_count; i++) {
2402 unsigned int len = min(obj_overlap, (u64)PAGE_SIZE);
2404 obj_req->copyup_bvecs[i].bv_page = alloc_page(GFP_NOIO);
2405 if (!obj_req->copyup_bvecs[i].bv_page)
2408 obj_req->copyup_bvecs[i].bv_offset = 0;
2409 obj_req->copyup_bvecs[i].bv_len = len;
2413 rbd_assert(!obj_overlap);
2417 static int rbd_obj_handle_write_guard(struct rbd_obj_request *obj_req)
2419 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2422 rbd_assert(obj_req->num_img_extents);
2423 prune_extents(obj_req->img_extents, &obj_req->num_img_extents,
2424 rbd_dev->parent_overlap);
2425 if (!obj_req->num_img_extents) {
2427 * The overlap has become 0 (most likely because the
2428 * image has been flattened). Use rbd_obj_issue_copyup()
2429 * to re-submit the original write request -- the copyup
2430 * operation itself will be a no-op, since someone must
2431 * have populated the child object while we weren't
2432 * looking. Move to WRITE_FLAT state as we'll be done
2433 * with the operation once the null copyup completes.
2435 obj_req->write_state = RBD_OBJ_WRITE_FLAT;
2436 return rbd_obj_issue_copyup(obj_req, 0);
2439 ret = setup_copyup_bvecs(obj_req, rbd_obj_img_extents_bytes(obj_req));
2443 obj_req->write_state = RBD_OBJ_WRITE_COPYUP;
2444 return rbd_obj_read_from_parent(obj_req);
2447 static bool rbd_obj_handle_write(struct rbd_obj_request *obj_req)
2452 switch (obj_req->write_state) {
2453 case RBD_OBJ_WRITE_GUARD:
2454 rbd_assert(!obj_req->xferred);
2455 if (obj_req->result == -ENOENT) {
2457 * The target object doesn't exist. Read the data for
2458 * the entire target object up to the overlap point (if
2459 * any) from the parent, so we can use it for a copyup.
2461 ret = rbd_obj_handle_write_guard(obj_req);
2463 obj_req->result = ret;
2469 case RBD_OBJ_WRITE_FLAT:
2470 if (!obj_req->result)
2472 * There is no such thing as a successful short
2473 * write -- indicate the whole request was satisfied.
2475 obj_req->xferred = obj_req->ex.oe_len;
2477 case RBD_OBJ_WRITE_COPYUP:
2478 obj_req->write_state = RBD_OBJ_WRITE_GUARD;
2479 if (obj_req->result)
2482 rbd_assert(obj_req->xferred);
2483 ret = rbd_obj_issue_copyup(obj_req, obj_req->xferred);
2485 obj_req->result = ret;
2495 * Returns true if @obj_req is completed, or false otherwise.
2497 static bool __rbd_obj_handle_request(struct rbd_obj_request *obj_req)
2499 switch (obj_req->img_request->op_type) {
2501 return rbd_obj_handle_read(obj_req);
2503 return rbd_obj_handle_write(obj_req);
2504 case OBJ_OP_DISCARD:
2505 if (rbd_obj_handle_write(obj_req)) {
2507 * Hide -ENOENT from delete/truncate/zero -- discarding
2508 * a non-existent object is not a problem.
2510 if (obj_req->result == -ENOENT) {
2511 obj_req->result = 0;
2512 obj_req->xferred = obj_req->ex.oe_len;
2522 static void rbd_obj_end_request(struct rbd_obj_request *obj_req)
2524 struct rbd_img_request *img_req = obj_req->img_request;
2526 rbd_assert((!obj_req->result &&
2527 obj_req->xferred == obj_req->ex.oe_len) ||
2528 (obj_req->result < 0 && !obj_req->xferred));
2529 if (!obj_req->result) {
2530 img_req->xferred += obj_req->xferred;
2534 rbd_warn(img_req->rbd_dev,
2535 "%s at objno %llu %llu~%llu result %d xferred %llu",
2536 obj_op_name(img_req->op_type), obj_req->ex.oe_objno,
2537 obj_req->ex.oe_off, obj_req->ex.oe_len, obj_req->result,
2539 if (!img_req->result) {
2540 img_req->result = obj_req->result;
2541 img_req->xferred = 0;
2545 static void rbd_img_end_child_request(struct rbd_img_request *img_req)
2547 struct rbd_obj_request *obj_req = img_req->obj_request;
2549 rbd_assert(test_bit(IMG_REQ_CHILD, &img_req->flags));
2550 rbd_assert((!img_req->result &&
2551 img_req->xferred == rbd_obj_img_extents_bytes(obj_req)) ||
2552 (img_req->result < 0 && !img_req->xferred));
2554 obj_req->result = img_req->result;
2555 obj_req->xferred = img_req->xferred;
2556 rbd_img_request_put(img_req);
2559 static void rbd_img_end_request(struct rbd_img_request *img_req)
2561 rbd_assert(!test_bit(IMG_REQ_CHILD, &img_req->flags));
2562 rbd_assert((!img_req->result &&
2563 img_req->xferred == blk_rq_bytes(img_req->rq)) ||
2564 (img_req->result < 0 && !img_req->xferred));
2566 blk_mq_end_request(img_req->rq,
2567 errno_to_blk_status(img_req->result));
2568 rbd_img_request_put(img_req);
2571 static void rbd_obj_handle_request(struct rbd_obj_request *obj_req)
2573 struct rbd_img_request *img_req;
2576 if (!__rbd_obj_handle_request(obj_req))
2579 img_req = obj_req->img_request;
2580 spin_lock(&img_req->completion_lock);
2581 rbd_obj_end_request(obj_req);
2582 rbd_assert(img_req->pending_count);
2583 if (--img_req->pending_count) {
2584 spin_unlock(&img_req->completion_lock);
2588 spin_unlock(&img_req->completion_lock);
2589 if (test_bit(IMG_REQ_CHILD, &img_req->flags)) {
2590 obj_req = img_req->obj_request;
2591 rbd_img_end_child_request(img_req);
2594 rbd_img_end_request(img_req);
2597 static const struct rbd_client_id rbd_empty_cid;
2599 static bool rbd_cid_equal(const struct rbd_client_id *lhs,
2600 const struct rbd_client_id *rhs)
2602 return lhs->gid == rhs->gid && lhs->handle == rhs->handle;
2605 static struct rbd_client_id rbd_get_cid(struct rbd_device *rbd_dev)
2607 struct rbd_client_id cid;
2609 mutex_lock(&rbd_dev->watch_mutex);
2610 cid.gid = ceph_client_gid(rbd_dev->rbd_client->client);
2611 cid.handle = rbd_dev->watch_cookie;
2612 mutex_unlock(&rbd_dev->watch_mutex);
2617 * lock_rwsem must be held for write
2619 static void rbd_set_owner_cid(struct rbd_device *rbd_dev,
2620 const struct rbd_client_id *cid)
2622 dout("%s rbd_dev %p %llu-%llu -> %llu-%llu\n", __func__, rbd_dev,
2623 rbd_dev->owner_cid.gid, rbd_dev->owner_cid.handle,
2624 cid->gid, cid->handle);
2625 rbd_dev->owner_cid = *cid; /* struct */
2628 static void format_lock_cookie(struct rbd_device *rbd_dev, char *buf)
2630 mutex_lock(&rbd_dev->watch_mutex);
2631 sprintf(buf, "%s %llu", RBD_LOCK_COOKIE_PREFIX, rbd_dev->watch_cookie);
2632 mutex_unlock(&rbd_dev->watch_mutex);
2635 static void __rbd_lock(struct rbd_device *rbd_dev, const char *cookie)
2637 struct rbd_client_id cid = rbd_get_cid(rbd_dev);
2639 strcpy(rbd_dev->lock_cookie, cookie);
2640 rbd_set_owner_cid(rbd_dev, &cid);
2641 queue_work(rbd_dev->task_wq, &rbd_dev->acquired_lock_work);
2645 * lock_rwsem must be held for write
2647 static int rbd_lock(struct rbd_device *rbd_dev)
2649 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2653 WARN_ON(__rbd_is_lock_owner(rbd_dev) ||
2654 rbd_dev->lock_cookie[0] != '\0');
2656 format_lock_cookie(rbd_dev, cookie);
2657 ret = ceph_cls_lock(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
2658 RBD_LOCK_NAME, CEPH_CLS_LOCK_EXCLUSIVE, cookie,
2659 RBD_LOCK_TAG, "", 0);
2663 rbd_dev->lock_state = RBD_LOCK_STATE_LOCKED;
2664 __rbd_lock(rbd_dev, cookie);
2669 * lock_rwsem must be held for write
2671 static void rbd_unlock(struct rbd_device *rbd_dev)
2673 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2676 WARN_ON(!__rbd_is_lock_owner(rbd_dev) ||
2677 rbd_dev->lock_cookie[0] == '\0');
2679 ret = ceph_cls_unlock(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
2680 RBD_LOCK_NAME, rbd_dev->lock_cookie);
2681 if (ret && ret != -ENOENT)
2682 rbd_warn(rbd_dev, "failed to unlock: %d", ret);
2684 /* treat errors as the image is unlocked */
2685 rbd_dev->lock_state = RBD_LOCK_STATE_UNLOCKED;
2686 rbd_dev->lock_cookie[0] = '\0';
2687 rbd_set_owner_cid(rbd_dev, &rbd_empty_cid);
2688 queue_work(rbd_dev->task_wq, &rbd_dev->released_lock_work);
2691 static int __rbd_notify_op_lock(struct rbd_device *rbd_dev,
2692 enum rbd_notify_op notify_op,
2693 struct page ***preply_pages,
2696 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2697 struct rbd_client_id cid = rbd_get_cid(rbd_dev);
2698 char buf[4 + 8 + 8 + CEPH_ENCODING_START_BLK_LEN];
2699 int buf_size = sizeof(buf);
2702 dout("%s rbd_dev %p notify_op %d\n", __func__, rbd_dev, notify_op);
2704 /* encode *LockPayload NotifyMessage (op + ClientId) */
2705 ceph_start_encoding(&p, 2, 1, buf_size - CEPH_ENCODING_START_BLK_LEN);
2706 ceph_encode_32(&p, notify_op);
2707 ceph_encode_64(&p, cid.gid);
2708 ceph_encode_64(&p, cid.handle);
2710 return ceph_osdc_notify(osdc, &rbd_dev->header_oid,
2711 &rbd_dev->header_oloc, buf, buf_size,
2712 RBD_NOTIFY_TIMEOUT, preply_pages, preply_len);
2715 static void rbd_notify_op_lock(struct rbd_device *rbd_dev,
2716 enum rbd_notify_op notify_op)
2718 struct page **reply_pages;
2721 __rbd_notify_op_lock(rbd_dev, notify_op, &reply_pages, &reply_len);
2722 ceph_release_page_vector(reply_pages, calc_pages_for(0, reply_len));
2725 static void rbd_notify_acquired_lock(struct work_struct *work)
2727 struct rbd_device *rbd_dev = container_of(work, struct rbd_device,
2728 acquired_lock_work);
2730 rbd_notify_op_lock(rbd_dev, RBD_NOTIFY_OP_ACQUIRED_LOCK);
2733 static void rbd_notify_released_lock(struct work_struct *work)
2735 struct rbd_device *rbd_dev = container_of(work, struct rbd_device,
2736 released_lock_work);
2738 rbd_notify_op_lock(rbd_dev, RBD_NOTIFY_OP_RELEASED_LOCK);
2741 static int rbd_request_lock(struct rbd_device *rbd_dev)
2743 struct page **reply_pages;
2745 bool lock_owner_responded = false;
2748 dout("%s rbd_dev %p\n", __func__, rbd_dev);
2750 ret = __rbd_notify_op_lock(rbd_dev, RBD_NOTIFY_OP_REQUEST_LOCK,
2751 &reply_pages, &reply_len);
2752 if (ret && ret != -ETIMEDOUT) {
2753 rbd_warn(rbd_dev, "failed to request lock: %d", ret);
2757 if (reply_len > 0 && reply_len <= PAGE_SIZE) {
2758 void *p = page_address(reply_pages[0]);
2759 void *const end = p + reply_len;
2762 ceph_decode_32_safe(&p, end, n, e_inval); /* num_acks */
2767 ceph_decode_need(&p, end, 8 + 8, e_inval);
2768 p += 8 + 8; /* skip gid and cookie */
2770 ceph_decode_32_safe(&p, end, len, e_inval);
2774 if (lock_owner_responded) {
2776 "duplicate lock owners detected");
2781 lock_owner_responded = true;
2782 ret = ceph_start_decoding(&p, end, 1, "ResponseMessage",
2786 "failed to decode ResponseMessage: %d",
2791 ret = ceph_decode_32(&p);
2795 if (!lock_owner_responded) {
2796 rbd_warn(rbd_dev, "no lock owners detected");
2801 ceph_release_page_vector(reply_pages, calc_pages_for(0, reply_len));
2809 static void wake_requests(struct rbd_device *rbd_dev, bool wake_all)
2811 dout("%s rbd_dev %p wake_all %d\n", __func__, rbd_dev, wake_all);
2813 cancel_delayed_work(&rbd_dev->lock_dwork);
2815 wake_up_all(&rbd_dev->lock_waitq);
2817 wake_up(&rbd_dev->lock_waitq);
2820 static int get_lock_owner_info(struct rbd_device *rbd_dev,
2821 struct ceph_locker **lockers, u32 *num_lockers)
2823 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2828 dout("%s rbd_dev %p\n", __func__, rbd_dev);
2830 ret = ceph_cls_lock_info(osdc, &rbd_dev->header_oid,
2831 &rbd_dev->header_oloc, RBD_LOCK_NAME,
2832 &lock_type, &lock_tag, lockers, num_lockers);
2836 if (*num_lockers == 0) {
2837 dout("%s rbd_dev %p no lockers detected\n", __func__, rbd_dev);
2841 if (strcmp(lock_tag, RBD_LOCK_TAG)) {
2842 rbd_warn(rbd_dev, "locked by external mechanism, tag %s",
2848 if (lock_type == CEPH_CLS_LOCK_SHARED) {
2849 rbd_warn(rbd_dev, "shared lock type detected");
2854 if (strncmp((*lockers)[0].id.cookie, RBD_LOCK_COOKIE_PREFIX,
2855 strlen(RBD_LOCK_COOKIE_PREFIX))) {
2856 rbd_warn(rbd_dev, "locked by external mechanism, cookie %s",
2857 (*lockers)[0].id.cookie);
2867 static int find_watcher(struct rbd_device *rbd_dev,
2868 const struct ceph_locker *locker)
2870 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2871 struct ceph_watch_item *watchers;
2877 ret = ceph_osdc_list_watchers(osdc, &rbd_dev->header_oid,
2878 &rbd_dev->header_oloc, &watchers,
2883 sscanf(locker->id.cookie, RBD_LOCK_COOKIE_PREFIX " %llu", &cookie);
2884 for (i = 0; i < num_watchers; i++) {
2885 if (!memcmp(&watchers[i].addr, &locker->info.addr,
2886 sizeof(locker->info.addr)) &&
2887 watchers[i].cookie == cookie) {
2888 struct rbd_client_id cid = {
2889 .gid = le64_to_cpu(watchers[i].name.num),
2893 dout("%s rbd_dev %p found cid %llu-%llu\n", __func__,
2894 rbd_dev, cid.gid, cid.handle);
2895 rbd_set_owner_cid(rbd_dev, &cid);
2901 dout("%s rbd_dev %p no watchers\n", __func__, rbd_dev);
2909 * lock_rwsem must be held for write
2911 static int rbd_try_lock(struct rbd_device *rbd_dev)
2913 struct ceph_client *client = rbd_dev->rbd_client->client;
2914 struct ceph_locker *lockers;
2919 ret = rbd_lock(rbd_dev);
2923 /* determine if the current lock holder is still alive */
2924 ret = get_lock_owner_info(rbd_dev, &lockers, &num_lockers);
2928 if (num_lockers == 0)
2931 ret = find_watcher(rbd_dev, lockers);
2934 ret = 0; /* have to request lock */
2938 rbd_warn(rbd_dev, "%s%llu seems dead, breaking lock",
2939 ENTITY_NAME(lockers[0].id.name));
2941 ret = ceph_monc_blacklist_add(&client->monc,
2942 &lockers[0].info.addr);
2944 rbd_warn(rbd_dev, "blacklist of %s%llu failed: %d",
2945 ENTITY_NAME(lockers[0].id.name), ret);
2949 ret = ceph_cls_break_lock(&client->osdc, &rbd_dev->header_oid,
2950 &rbd_dev->header_oloc, RBD_LOCK_NAME,
2951 lockers[0].id.cookie,
2952 &lockers[0].id.name);
2953 if (ret && ret != -ENOENT)
2957 ceph_free_lockers(lockers, num_lockers);
2961 ceph_free_lockers(lockers, num_lockers);
2966 * ret is set only if lock_state is RBD_LOCK_STATE_UNLOCKED
2968 static enum rbd_lock_state rbd_try_acquire_lock(struct rbd_device *rbd_dev,
2971 enum rbd_lock_state lock_state;
2973 down_read(&rbd_dev->lock_rwsem);
2974 dout("%s rbd_dev %p read lock_state %d\n", __func__, rbd_dev,
2975 rbd_dev->lock_state);
2976 if (__rbd_is_lock_owner(rbd_dev)) {
2977 lock_state = rbd_dev->lock_state;
2978 up_read(&rbd_dev->lock_rwsem);
2982 up_read(&rbd_dev->lock_rwsem);
2983 down_write(&rbd_dev->lock_rwsem);
2984 dout("%s rbd_dev %p write lock_state %d\n", __func__, rbd_dev,
2985 rbd_dev->lock_state);
2986 if (!__rbd_is_lock_owner(rbd_dev)) {
2987 *pret = rbd_try_lock(rbd_dev);
2989 rbd_warn(rbd_dev, "failed to acquire lock: %d", *pret);
2992 lock_state = rbd_dev->lock_state;
2993 up_write(&rbd_dev->lock_rwsem);
2997 static void rbd_acquire_lock(struct work_struct *work)
2999 struct rbd_device *rbd_dev = container_of(to_delayed_work(work),
3000 struct rbd_device, lock_dwork);
3001 enum rbd_lock_state lock_state;
3004 dout("%s rbd_dev %p\n", __func__, rbd_dev);
3006 lock_state = rbd_try_acquire_lock(rbd_dev, &ret);
3007 if (lock_state != RBD_LOCK_STATE_UNLOCKED || ret == -EBLACKLISTED) {
3008 if (lock_state == RBD_LOCK_STATE_LOCKED)
3009 wake_requests(rbd_dev, true);
3010 dout("%s rbd_dev %p lock_state %d ret %d - done\n", __func__,
3011 rbd_dev, lock_state, ret);
3015 ret = rbd_request_lock(rbd_dev);
3016 if (ret == -ETIMEDOUT) {
3017 goto again; /* treat this as a dead client */
3018 } else if (ret == -EROFS) {
3019 rbd_warn(rbd_dev, "peer will not release lock");
3021 * If this is rbd_add_acquire_lock(), we want to fail
3022 * immediately -- reuse BLACKLISTED flag. Otherwise we
3025 if (!(rbd_dev->disk->flags & GENHD_FL_UP)) {
3026 set_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags);
3027 /* wake "rbd map --exclusive" process */
3028 wake_requests(rbd_dev, false);
3030 } else if (ret < 0) {
3031 rbd_warn(rbd_dev, "error requesting lock: %d", ret);
3032 mod_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork,
3036 * lock owner acked, but resend if we don't see them
3039 dout("%s rbd_dev %p requeueing lock_dwork\n", __func__,
3041 mod_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork,
3042 msecs_to_jiffies(2 * RBD_NOTIFY_TIMEOUT * MSEC_PER_SEC));
3047 * lock_rwsem must be held for write
3049 static bool rbd_release_lock(struct rbd_device *rbd_dev)
3051 dout("%s rbd_dev %p read lock_state %d\n", __func__, rbd_dev,
3052 rbd_dev->lock_state);
3053 if (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED)
3056 rbd_dev->lock_state = RBD_LOCK_STATE_RELEASING;
3057 downgrade_write(&rbd_dev->lock_rwsem);
3059 * Ensure that all in-flight IO is flushed.
3061 * FIXME: ceph_osdc_sync() flushes the entire OSD client, which
3062 * may be shared with other devices.
3064 ceph_osdc_sync(&rbd_dev->rbd_client->client->osdc);
3065 up_read(&rbd_dev->lock_rwsem);
3067 down_write(&rbd_dev->lock_rwsem);
3068 dout("%s rbd_dev %p write lock_state %d\n", __func__, rbd_dev,
3069 rbd_dev->lock_state);
3070 if (rbd_dev->lock_state != RBD_LOCK_STATE_RELEASING)
3073 rbd_unlock(rbd_dev);
3075 * Give others a chance to grab the lock - we would re-acquire
3076 * almost immediately if we got new IO during ceph_osdc_sync()
3077 * otherwise. We need to ack our own notifications, so this
3078 * lock_dwork will be requeued from rbd_wait_state_locked()
3079 * after wake_requests() in rbd_handle_released_lock().
3081 cancel_delayed_work(&rbd_dev->lock_dwork);
3085 static void rbd_release_lock_work(struct work_struct *work)
3087 struct rbd_device *rbd_dev = container_of(work, struct rbd_device,
3090 down_write(&rbd_dev->lock_rwsem);
3091 rbd_release_lock(rbd_dev);
3092 up_write(&rbd_dev->lock_rwsem);
3095 static void rbd_handle_acquired_lock(struct rbd_device *rbd_dev, u8 struct_v,
3098 struct rbd_client_id cid = { 0 };
3100 if (struct_v >= 2) {
3101 cid.gid = ceph_decode_64(p);
3102 cid.handle = ceph_decode_64(p);
3105 dout("%s rbd_dev %p cid %llu-%llu\n", __func__, rbd_dev, cid.gid,
3107 if (!rbd_cid_equal(&cid, &rbd_empty_cid)) {
3108 down_write(&rbd_dev->lock_rwsem);
3109 if (rbd_cid_equal(&cid, &rbd_dev->owner_cid)) {
3111 * we already know that the remote client is
3114 up_write(&rbd_dev->lock_rwsem);
3118 rbd_set_owner_cid(rbd_dev, &cid);
3119 downgrade_write(&rbd_dev->lock_rwsem);
3121 down_read(&rbd_dev->lock_rwsem);
3124 if (!__rbd_is_lock_owner(rbd_dev))
3125 wake_requests(rbd_dev, false);
3126 up_read(&rbd_dev->lock_rwsem);
3129 static void rbd_handle_released_lock(struct rbd_device *rbd_dev, u8 struct_v,
3132 struct rbd_client_id cid = { 0 };
3134 if (struct_v >= 2) {
3135 cid.gid = ceph_decode_64(p);
3136 cid.handle = ceph_decode_64(p);
3139 dout("%s rbd_dev %p cid %llu-%llu\n", __func__, rbd_dev, cid.gid,
3141 if (!rbd_cid_equal(&cid, &rbd_empty_cid)) {
3142 down_write(&rbd_dev->lock_rwsem);
3143 if (!rbd_cid_equal(&cid, &rbd_dev->owner_cid)) {
3144 dout("%s rbd_dev %p unexpected owner, cid %llu-%llu != owner_cid %llu-%llu\n",
3145 __func__, rbd_dev, cid.gid, cid.handle,
3146 rbd_dev->owner_cid.gid, rbd_dev->owner_cid.handle);
3147 up_write(&rbd_dev->lock_rwsem);
3151 rbd_set_owner_cid(rbd_dev, &rbd_empty_cid);
3152 downgrade_write(&rbd_dev->lock_rwsem);
3154 down_read(&rbd_dev->lock_rwsem);
3157 if (!__rbd_is_lock_owner(rbd_dev))
3158 wake_requests(rbd_dev, false);
3159 up_read(&rbd_dev->lock_rwsem);
3163 * Returns result for ResponseMessage to be encoded (<= 0), or 1 if no
3164 * ResponseMessage is needed.
3166 static int rbd_handle_request_lock(struct rbd_device *rbd_dev, u8 struct_v,
3169 struct rbd_client_id my_cid = rbd_get_cid(rbd_dev);
3170 struct rbd_client_id cid = { 0 };
3173 if (struct_v >= 2) {
3174 cid.gid = ceph_decode_64(p);
3175 cid.handle = ceph_decode_64(p);
3178 dout("%s rbd_dev %p cid %llu-%llu\n", __func__, rbd_dev, cid.gid,
3180 if (rbd_cid_equal(&cid, &my_cid))
3183 down_read(&rbd_dev->lock_rwsem);
3184 if (__rbd_is_lock_owner(rbd_dev)) {
3185 if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED &&
3186 rbd_cid_equal(&rbd_dev->owner_cid, &rbd_empty_cid))
3190 * encode ResponseMessage(0) so the peer can detect
3195 if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED) {
3196 if (!rbd_dev->opts->exclusive) {
3197 dout("%s rbd_dev %p queueing unlock_work\n",
3199 queue_work(rbd_dev->task_wq,
3200 &rbd_dev->unlock_work);
3202 /* refuse to release the lock */
3209 up_read(&rbd_dev->lock_rwsem);
3213 static void __rbd_acknowledge_notify(struct rbd_device *rbd_dev,
3214 u64 notify_id, u64 cookie, s32 *result)
3216 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3217 char buf[4 + CEPH_ENCODING_START_BLK_LEN];
3218 int buf_size = sizeof(buf);
3224 /* encode ResponseMessage */
3225 ceph_start_encoding(&p, 1, 1,
3226 buf_size - CEPH_ENCODING_START_BLK_LEN);
3227 ceph_encode_32(&p, *result);
3232 ret = ceph_osdc_notify_ack(osdc, &rbd_dev->header_oid,
3233 &rbd_dev->header_oloc, notify_id, cookie,
3236 rbd_warn(rbd_dev, "acknowledge_notify failed: %d", ret);
3239 static void rbd_acknowledge_notify(struct rbd_device *rbd_dev, u64 notify_id,
3242 dout("%s rbd_dev %p\n", __func__, rbd_dev);
3243 __rbd_acknowledge_notify(rbd_dev, notify_id, cookie, NULL);
3246 static void rbd_acknowledge_notify_result(struct rbd_device *rbd_dev,
3247 u64 notify_id, u64 cookie, s32 result)
3249 dout("%s rbd_dev %p result %d\n", __func__, rbd_dev, result);
3250 __rbd_acknowledge_notify(rbd_dev, notify_id, cookie, &result);
3253 static void rbd_watch_cb(void *arg, u64 notify_id, u64 cookie,
3254 u64 notifier_id, void *data, size_t data_len)
3256 struct rbd_device *rbd_dev = arg;
3258 void *const end = p + data_len;
3264 dout("%s rbd_dev %p cookie %llu notify_id %llu data_len %zu\n",
3265 __func__, rbd_dev, cookie, notify_id, data_len);
3267 ret = ceph_start_decoding(&p, end, 1, "NotifyMessage",
3270 rbd_warn(rbd_dev, "failed to decode NotifyMessage: %d",
3275 notify_op = ceph_decode_32(&p);
3277 /* legacy notification for header updates */
3278 notify_op = RBD_NOTIFY_OP_HEADER_UPDATE;
3282 dout("%s rbd_dev %p notify_op %u\n", __func__, rbd_dev, notify_op);
3283 switch (notify_op) {
3284 case RBD_NOTIFY_OP_ACQUIRED_LOCK:
3285 rbd_handle_acquired_lock(rbd_dev, struct_v, &p);
3286 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
3288 case RBD_NOTIFY_OP_RELEASED_LOCK:
3289 rbd_handle_released_lock(rbd_dev, struct_v, &p);
3290 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
3292 case RBD_NOTIFY_OP_REQUEST_LOCK:
3293 ret = rbd_handle_request_lock(rbd_dev, struct_v, &p);
3295 rbd_acknowledge_notify_result(rbd_dev, notify_id,
3298 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
3300 case RBD_NOTIFY_OP_HEADER_UPDATE:
3301 ret = rbd_dev_refresh(rbd_dev);
3303 rbd_warn(rbd_dev, "refresh failed: %d", ret);
3305 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
3308 if (rbd_is_lock_owner(rbd_dev))
3309 rbd_acknowledge_notify_result(rbd_dev, notify_id,
3310 cookie, -EOPNOTSUPP);
3312 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
3317 static void __rbd_unregister_watch(struct rbd_device *rbd_dev);
3319 static void rbd_watch_errcb(void *arg, u64 cookie, int err)
3321 struct rbd_device *rbd_dev = arg;
3323 rbd_warn(rbd_dev, "encountered watch error: %d", err);
3325 down_write(&rbd_dev->lock_rwsem);
3326 rbd_set_owner_cid(rbd_dev, &rbd_empty_cid);
3327 up_write(&rbd_dev->lock_rwsem);
3329 mutex_lock(&rbd_dev->watch_mutex);
3330 if (rbd_dev->watch_state == RBD_WATCH_STATE_REGISTERED) {
3331 __rbd_unregister_watch(rbd_dev);
3332 rbd_dev->watch_state = RBD_WATCH_STATE_ERROR;
3334 queue_delayed_work(rbd_dev->task_wq, &rbd_dev->watch_dwork, 0);
3336 mutex_unlock(&rbd_dev->watch_mutex);
3340 * watch_mutex must be locked
3342 static int __rbd_register_watch(struct rbd_device *rbd_dev)
3344 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3345 struct ceph_osd_linger_request *handle;
3347 rbd_assert(!rbd_dev->watch_handle);
3348 dout("%s rbd_dev %p\n", __func__, rbd_dev);
3350 handle = ceph_osdc_watch(osdc, &rbd_dev->header_oid,
3351 &rbd_dev->header_oloc, rbd_watch_cb,
3352 rbd_watch_errcb, rbd_dev);
3354 return PTR_ERR(handle);
3356 rbd_dev->watch_handle = handle;
3361 * watch_mutex must be locked
3363 static void __rbd_unregister_watch(struct rbd_device *rbd_dev)
3365 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3368 rbd_assert(rbd_dev->watch_handle);
3369 dout("%s rbd_dev %p\n", __func__, rbd_dev);
3371 ret = ceph_osdc_unwatch(osdc, rbd_dev->watch_handle);
3373 rbd_warn(rbd_dev, "failed to unwatch: %d", ret);
3375 rbd_dev->watch_handle = NULL;
3378 static int rbd_register_watch(struct rbd_device *rbd_dev)
3382 mutex_lock(&rbd_dev->watch_mutex);
3383 rbd_assert(rbd_dev->watch_state == RBD_WATCH_STATE_UNREGISTERED);
3384 ret = __rbd_register_watch(rbd_dev);
3388 rbd_dev->watch_state = RBD_WATCH_STATE_REGISTERED;
3389 rbd_dev->watch_cookie = rbd_dev->watch_handle->linger_id;
3392 mutex_unlock(&rbd_dev->watch_mutex);
3396 static void cancel_tasks_sync(struct rbd_device *rbd_dev)
3398 dout("%s rbd_dev %p\n", __func__, rbd_dev);
3400 cancel_delayed_work_sync(&rbd_dev->watch_dwork);
3401 cancel_work_sync(&rbd_dev->acquired_lock_work);
3402 cancel_work_sync(&rbd_dev->released_lock_work);
3403 cancel_delayed_work_sync(&rbd_dev->lock_dwork);
3404 cancel_work_sync(&rbd_dev->unlock_work);
3407 static void rbd_unregister_watch(struct rbd_device *rbd_dev)
3409 WARN_ON(waitqueue_active(&rbd_dev->lock_waitq));
3410 cancel_tasks_sync(rbd_dev);
3412 mutex_lock(&rbd_dev->watch_mutex);
3413 if (rbd_dev->watch_state == RBD_WATCH_STATE_REGISTERED)
3414 __rbd_unregister_watch(rbd_dev);
3415 rbd_dev->watch_state = RBD_WATCH_STATE_UNREGISTERED;
3416 mutex_unlock(&rbd_dev->watch_mutex);
3418 ceph_osdc_flush_notifies(&rbd_dev->rbd_client->client->osdc);
3422 * lock_rwsem must be held for write
3424 static void rbd_reacquire_lock(struct rbd_device *rbd_dev)
3426 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3430 WARN_ON(rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED);
3432 format_lock_cookie(rbd_dev, cookie);
3433 ret = ceph_cls_set_cookie(osdc, &rbd_dev->header_oid,
3434 &rbd_dev->header_oloc, RBD_LOCK_NAME,
3435 CEPH_CLS_LOCK_EXCLUSIVE, rbd_dev->lock_cookie,
3436 RBD_LOCK_TAG, cookie);
3438 if (ret != -EOPNOTSUPP)
3439 rbd_warn(rbd_dev, "failed to update lock cookie: %d",
3443 * Lock cookie cannot be updated on older OSDs, so do
3444 * a manual release and queue an acquire.
3446 if (rbd_release_lock(rbd_dev))
3447 queue_delayed_work(rbd_dev->task_wq,
3448 &rbd_dev->lock_dwork, 0);
3450 __rbd_lock(rbd_dev, cookie);
3454 static void rbd_reregister_watch(struct work_struct *work)
3456 struct rbd_device *rbd_dev = container_of(to_delayed_work(work),
3457 struct rbd_device, watch_dwork);
3460 dout("%s rbd_dev %p\n", __func__, rbd_dev);
3462 mutex_lock(&rbd_dev->watch_mutex);
3463 if (rbd_dev->watch_state != RBD_WATCH_STATE_ERROR) {
3464 mutex_unlock(&rbd_dev->watch_mutex);
3468 ret = __rbd_register_watch(rbd_dev);
3470 rbd_warn(rbd_dev, "failed to reregister watch: %d", ret);
3471 if (ret == -EBLACKLISTED || ret == -ENOENT) {
3472 set_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags);
3473 wake_requests(rbd_dev, true);
3475 queue_delayed_work(rbd_dev->task_wq,
3476 &rbd_dev->watch_dwork,
3479 mutex_unlock(&rbd_dev->watch_mutex);
3483 rbd_dev->watch_state = RBD_WATCH_STATE_REGISTERED;
3484 rbd_dev->watch_cookie = rbd_dev->watch_handle->linger_id;
3485 mutex_unlock(&rbd_dev->watch_mutex);
3487 down_write(&rbd_dev->lock_rwsem);
3488 if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED)
3489 rbd_reacquire_lock(rbd_dev);
3490 up_write(&rbd_dev->lock_rwsem);
3492 ret = rbd_dev_refresh(rbd_dev);
3494 rbd_warn(rbd_dev, "reregistration refresh failed: %d", ret);
3498 * Synchronous osd object method call. Returns the number of bytes
3499 * returned in the outbound buffer, or a negative error code.
3501 static int rbd_obj_method_sync(struct rbd_device *rbd_dev,
3502 struct ceph_object_id *oid,
3503 struct ceph_object_locator *oloc,
3504 const char *method_name,
3505 const void *outbound,
3506 size_t outbound_size,
3508 size_t inbound_size)
3510 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3511 struct page *req_page = NULL;
3512 struct page *reply_page;
3516 * Method calls are ultimately read operations. The result
3517 * should placed into the inbound buffer provided. They
3518 * also supply outbound data--parameters for the object
3519 * method. Currently if this is present it will be a
3523 if (outbound_size > PAGE_SIZE)
3526 req_page = alloc_page(GFP_KERNEL);
3530 memcpy(page_address(req_page), outbound, outbound_size);
3533 reply_page = alloc_page(GFP_KERNEL);
3536 __free_page(req_page);
3540 ret = ceph_osdc_call(osdc, oid, oloc, RBD_DRV_NAME, method_name,
3541 CEPH_OSD_FLAG_READ, req_page, outbound_size,
3542 reply_page, &inbound_size);
3544 memcpy(inbound, page_address(reply_page), inbound_size);
3549 __free_page(req_page);
3550 __free_page(reply_page);
3555 * lock_rwsem must be held for read
3557 static int rbd_wait_state_locked(struct rbd_device *rbd_dev, bool may_acquire)
3560 unsigned long timeout;
3563 if (test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags))
3564 return -EBLACKLISTED;
3566 if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED)
3570 rbd_warn(rbd_dev, "exclusive lock required");
3576 * Note the use of mod_delayed_work() in rbd_acquire_lock()
3577 * and cancel_delayed_work() in wake_requests().
3579 dout("%s rbd_dev %p queueing lock_dwork\n", __func__, rbd_dev);
3580 queue_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork, 0);
3581 prepare_to_wait_exclusive(&rbd_dev->lock_waitq, &wait,
3582 TASK_UNINTERRUPTIBLE);
3583 up_read(&rbd_dev->lock_rwsem);
3584 timeout = schedule_timeout(ceph_timeout_jiffies(
3585 rbd_dev->opts->lock_timeout));
3586 down_read(&rbd_dev->lock_rwsem);
3587 if (test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags)) {
3588 ret = -EBLACKLISTED;
3592 rbd_warn(rbd_dev, "timed out waiting for lock");
3596 } while (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED);
3598 finish_wait(&rbd_dev->lock_waitq, &wait);
3602 static void rbd_queue_workfn(struct work_struct *work)
3604 struct request *rq = blk_mq_rq_from_pdu(work);
3605 struct rbd_device *rbd_dev = rq->q->queuedata;
3606 struct rbd_img_request *img_request;
3607 struct ceph_snap_context *snapc = NULL;
3608 u64 offset = (u64)blk_rq_pos(rq) << SECTOR_SHIFT;
3609 u64 length = blk_rq_bytes(rq);
3610 enum obj_operation_type op_type;
3612 bool must_be_locked;
3615 switch (req_op(rq)) {
3616 case REQ_OP_DISCARD:
3617 case REQ_OP_WRITE_ZEROES:
3618 op_type = OBJ_OP_DISCARD;
3621 op_type = OBJ_OP_WRITE;
3624 op_type = OBJ_OP_READ;
3627 dout("%s: non-fs request type %d\n", __func__, req_op(rq));
3632 /* Ignore/skip any zero-length requests */
3635 dout("%s: zero-length request\n", __func__);
3640 rbd_assert(op_type == OBJ_OP_READ ||
3641 rbd_dev->spec->snap_id == CEPH_NOSNAP);
3644 * Quit early if the mapped snapshot no longer exists. It's
3645 * still possible the snapshot will have disappeared by the
3646 * time our request arrives at the osd, but there's no sense in
3647 * sending it if we already know.
3649 if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags)) {
3650 dout("request for non-existent snapshot");
3651 rbd_assert(rbd_dev->spec->snap_id != CEPH_NOSNAP);
3656 if (offset && length > U64_MAX - offset + 1) {
3657 rbd_warn(rbd_dev, "bad request range (%llu~%llu)", offset,
3660 goto err_rq; /* Shouldn't happen */
3663 blk_mq_start_request(rq);
3665 down_read(&rbd_dev->header_rwsem);
3666 mapping_size = rbd_dev->mapping.size;
3667 if (op_type != OBJ_OP_READ) {
3668 snapc = rbd_dev->header.snapc;
3669 ceph_get_snap_context(snapc);
3671 up_read(&rbd_dev->header_rwsem);
3673 if (offset + length > mapping_size) {
3674 rbd_warn(rbd_dev, "beyond EOD (%llu~%llu > %llu)", offset,
3675 length, mapping_size);
3681 (rbd_dev->header.features & RBD_FEATURE_EXCLUSIVE_LOCK) &&
3682 (op_type != OBJ_OP_READ || rbd_dev->opts->lock_on_read);
3683 if (must_be_locked) {
3684 down_read(&rbd_dev->lock_rwsem);
3685 result = rbd_wait_state_locked(rbd_dev,
3686 !rbd_dev->opts->exclusive);
3691 img_request = rbd_img_request_create(rbd_dev, op_type, snapc);
3696 img_request->rq = rq;
3697 snapc = NULL; /* img_request consumes a ref */
3699 if (op_type == OBJ_OP_DISCARD)
3700 result = rbd_img_fill_nodata(img_request, offset, length);
3702 result = rbd_img_fill_from_bio(img_request, offset, length,
3705 goto err_img_request;
3707 rbd_img_request_submit(img_request);
3709 up_read(&rbd_dev->lock_rwsem);
3713 rbd_img_request_put(img_request);
3716 up_read(&rbd_dev->lock_rwsem);
3719 rbd_warn(rbd_dev, "%s %llx at %llx result %d",
3720 obj_op_name(op_type), length, offset, result);
3721 ceph_put_snap_context(snapc);
3723 blk_mq_end_request(rq, errno_to_blk_status(result));
3726 static blk_status_t rbd_queue_rq(struct blk_mq_hw_ctx *hctx,
3727 const struct blk_mq_queue_data *bd)
3729 struct request *rq = bd->rq;
3730 struct work_struct *work = blk_mq_rq_to_pdu(rq);
3732 queue_work(rbd_wq, work);
3736 static void rbd_free_disk(struct rbd_device *rbd_dev)
3738 blk_cleanup_queue(rbd_dev->disk->queue);
3739 blk_mq_free_tag_set(&rbd_dev->tag_set);
3740 put_disk(rbd_dev->disk);
3741 rbd_dev->disk = NULL;
3744 static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
3745 struct ceph_object_id *oid,
3746 struct ceph_object_locator *oloc,
3747 void *buf, int buf_len)
3750 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3751 struct ceph_osd_request *req;
3752 struct page **pages;
3753 int num_pages = calc_pages_for(0, buf_len);
3756 req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_KERNEL);
3760 ceph_oid_copy(&req->r_base_oid, oid);
3761 ceph_oloc_copy(&req->r_base_oloc, oloc);
3762 req->r_flags = CEPH_OSD_FLAG_READ;
3764 ret = ceph_osdc_alloc_messages(req, GFP_KERNEL);
3768 pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
3769 if (IS_ERR(pages)) {
3770 ret = PTR_ERR(pages);
3774 osd_req_op_extent_init(req, 0, CEPH_OSD_OP_READ, 0, buf_len, 0, 0);
3775 osd_req_op_extent_osd_data_pages(req, 0, pages, buf_len, 0, false,
3778 ceph_osdc_start_request(osdc, req, false);
3779 ret = ceph_osdc_wait_request(osdc, req);
3781 ceph_copy_from_page_vector(pages, buf, 0, ret);
3784 ceph_osdc_put_request(req);
3789 * Read the complete header for the given rbd device. On successful
3790 * return, the rbd_dev->header field will contain up-to-date
3791 * information about the image.
3793 static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev)
3795 struct rbd_image_header_ondisk *ondisk = NULL;
3802 * The complete header will include an array of its 64-bit
3803 * snapshot ids, followed by the names of those snapshots as
3804 * a contiguous block of NUL-terminated strings. Note that
3805 * the number of snapshots could change by the time we read
3806 * it in, in which case we re-read it.
3813 size = sizeof (*ondisk);
3814 size += snap_count * sizeof (struct rbd_image_snap_ondisk);
3816 ondisk = kmalloc(size, GFP_KERNEL);
3820 ret = rbd_obj_read_sync(rbd_dev, &rbd_dev->header_oid,
3821 &rbd_dev->header_oloc, ondisk, size);
3824 if ((size_t)ret < size) {
3826 rbd_warn(rbd_dev, "short header read (want %zd got %d)",
3830 if (!rbd_dev_ondisk_valid(ondisk)) {
3832 rbd_warn(rbd_dev, "invalid header");
3836 names_size = le64_to_cpu(ondisk->snap_names_len);
3837 want_count = snap_count;
3838 snap_count = le32_to_cpu(ondisk->snap_count);
3839 } while (snap_count != want_count);
3841 ret = rbd_header_from_disk(rbd_dev, ondisk);
3849 * Clear the rbd device's EXISTS flag if the snapshot it's mapped to
3850 * has disappeared from the (just updated) snapshot context.
3852 static void rbd_exists_validate(struct rbd_device *rbd_dev)
3856 if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags))
3859 snap_id = rbd_dev->spec->snap_id;