Merge tag 'ceph-for-6.5-rc1' of https://github.com/ceph/ceph-client
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 7 Jul 2023 22:07:20 +0000 (15:07 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 7 Jul 2023 22:07:20 +0000 (15:07 -0700)
Pull ceph updates from Ilya Dryomov:
 "A bunch of CephFS fixups from Xiubo, mostly around dropping caps,
  along with a fix for a regression in the readahead handling code which
  sneaked in with the switch to netfs helpers"

* tag 'ceph-for-6.5-rc1' of https://github.com/ceph/ceph-client:
  ceph: don't let check_caps skip sending responses for revoke msgs
  ceph: issue a cap release immediately if no cap exists
  ceph: trigger to flush the buffer when making snapshot
  ceph: fix blindly expanding the readahead windows
  ceph: add a dedicated private data for netfs rreq
  ceph: voluntarily drop Xx caps for requests those touch parent mtime
  ceph: try to dump the msgs when decoding fails
  ceph: only send metrics when the MDS rank is ready

fs/ceph/addr.c
fs/ceph/caps.c
fs/ceph/dir.c
fs/ceph/file.c
fs/ceph/mds_client.c
fs/ceph/metric.c
fs/ceph/snap.c
fs/ceph/super.h

index 6bb251a4d613ebcf259da89c72f0782d275344d5..59cbfb80edbda9071e2960cb02deb8b6fbf2dae3 100644 (file)
@@ -187,16 +187,42 @@ static void ceph_netfs_expand_readahead(struct netfs_io_request *rreq)
        struct inode *inode = rreq->inode;
        struct ceph_inode_info *ci = ceph_inode(inode);
        struct ceph_file_layout *lo = &ci->i_layout;
+       unsigned long max_pages = inode->i_sb->s_bdi->ra_pages;
+       loff_t end = rreq->start + rreq->len, new_end;
+       struct ceph_netfs_request_data *priv = rreq->netfs_priv;
+       unsigned long max_len;
        u32 blockoff;
-       u64 blockno;
 
-       /* Expand the start downward */
-       blockno = div_u64_rem(rreq->start, lo->stripe_unit, &blockoff);
-       rreq->start = blockno * lo->stripe_unit;
-       rreq->len += blockoff;
+       if (priv) {
+               /* Readahead is disabled by posix_fadvise POSIX_FADV_RANDOM */
+               if (priv->file_ra_disabled)
+                       max_pages = 0;
+               else
+                       max_pages = priv->file_ra_pages;
+
+       }
 
-       /* Now, round up the length to the next block */
-       rreq->len = roundup(rreq->len, lo->stripe_unit);
+       /* Readahead is disabled */
+       if (!max_pages)
+               return;
+
+       max_len = max_pages << PAGE_SHIFT;
+
+       /*
+        * Try to expand the length forward by rounding up it to the next
+        * block, but do not exceed the file size, unless the original
+        * request already exceeds it.
+        */
+       new_end = min(round_up(end, lo->stripe_unit), rreq->i_size);
+       if (new_end > end && new_end <= rreq->start + max_len)
+               rreq->len = new_end - rreq->start;
+
+       /* Try to expand the start downward */
+       div_u64_rem(rreq->start, lo->stripe_unit, &blockoff);
+       if (rreq->len + blockoff <= max_len) {
+               rreq->start -= blockoff;
+               rreq->len += blockoff;
+       }
 }
 
 static bool ceph_netfs_clamp_length(struct netfs_io_subrequest *subreq)
@@ -362,18 +388,28 @@ static int ceph_init_request(struct netfs_io_request *rreq, struct file *file)
 {
        struct inode *inode = rreq->inode;
        int got = 0, want = CEPH_CAP_FILE_CACHE;
+       struct ceph_netfs_request_data *priv;
        int ret = 0;
 
        if (rreq->origin != NETFS_READAHEAD)
                return 0;
 
+       priv = kzalloc(sizeof(*priv), GFP_NOFS);
+       if (!priv)
+               return -ENOMEM;
+
        if (file) {
                struct ceph_rw_context *rw_ctx;
                struct ceph_file_info *fi = file->private_data;
 
+               priv->file_ra_pages = file->f_ra.ra_pages;
+               priv->file_ra_disabled = file->f_mode & FMODE_RANDOM;
+
                rw_ctx = ceph_find_rw_context(fi);
-               if (rw_ctx)
+               if (rw_ctx) {
+                       rreq->netfs_priv = priv;
                        return 0;
+               }
        }
 
        /*
@@ -383,27 +419,40 @@ static int ceph_init_request(struct netfs_io_request *rreq, struct file *file)
        ret = ceph_try_get_caps(inode, CEPH_CAP_FILE_RD, want, true, &got);
        if (ret < 0) {
                dout("start_read %p, error getting cap\n", inode);
-               return ret;
+               goto out;
        }
 
        if (!(got & want)) {
                dout("start_read %p, no cache cap\n", inode);
-               return -EACCES;
+               ret = -EACCES;
+               goto out;
+       }
+       if (ret == 0) {
+               ret = -EACCES;
+               goto out;
        }
-       if (ret == 0)
-               return -EACCES;
 
-       rreq->netfs_priv = (void *)(uintptr_t)got;
-       return 0;
+       priv->caps = got;
+       rreq->netfs_priv = priv;
+
+out:
+       if (ret < 0)
+               kfree(priv);
+
+       return ret;
 }
 
 static void ceph_netfs_free_request(struct netfs_io_request *rreq)
 {
-       struct ceph_inode_info *ci = ceph_inode(rreq->inode);
-       int got = (uintptr_t)rreq->netfs_priv;
+       struct ceph_netfs_request_data *priv = rreq->netfs_priv;
+
+       if (!priv)
+               return;
 
-       if (got)
-               ceph_put_cap_refs(ci, got);
+       if (priv->caps)
+               ceph_put_cap_refs(ceph_inode(rreq->inode), priv->caps);
+       kfree(priv);
+       rreq->netfs_priv = NULL;
 }
 
 const struct netfs_request_ops ceph_netfs_ops = {
index 2321e5ddb664de661549baf52c79857ffb7e2e18..e2bb0d0072da5adc215d199765ee21a46d478e1f 100644 (file)
@@ -3109,6 +3109,12 @@ static void __ceph_put_cap_refs(struct ceph_inode_info *ci, int had,
        }
        if (had & CEPH_CAP_FILE_WR) {
                if (--ci->i_wr_ref == 0) {
+                       /*
+                        * The Fb caps will always be took and released
+                        * together with the Fw caps.
+                        */
+                       WARN_ON_ONCE(ci->i_wb_ref);
+
                        last++;
                        check_flushsnaps = true;
                        if (ci->i_wrbuffer_ref_head == 0 &&
@@ -3560,6 +3566,15 @@ static void handle_cap_grant(struct inode *inode,
        }
        BUG_ON(cap->issued & ~cap->implemented);
 
+       /* don't let check_caps skip sending a response to MDS for revoke msgs */
+       if (le32_to_cpu(grant->op) == CEPH_CAP_OP_REVOKE) {
+               cap->mds_wanted = 0;
+               if (cap == ci->i_auth_cap)
+                       check_caps = 1; /* check auth cap only */
+               else
+                       check_caps = 2; /* check all caps */
+       }
+
        if (extra_info->inline_version > 0 &&
            extra_info->inline_version >= ci->i_inline_version) {
                ci->i_inline_version = extra_info->inline_version;
@@ -4086,6 +4101,7 @@ void ceph_handle_caps(struct ceph_mds_session *session,
        struct cap_extra_info extra_info = {};
        bool queue_trunc;
        bool close_sessions = false;
+       bool do_cap_release = false;
 
        dout("handle_caps from mds%d\n", session->s_mds);
 
@@ -4192,17 +4208,14 @@ void ceph_handle_caps(struct ceph_mds_session *session,
        if (!inode) {
                dout(" i don't have ino %llx\n", vino.ino);
 
-               if (op == CEPH_CAP_OP_IMPORT) {
-                       cap = ceph_get_cap(mdsc, NULL);
-                       cap->cap_ino = vino.ino;
-                       cap->queue_release = 1;
-                       cap->cap_id = le64_to_cpu(h->cap_id);
-                       cap->mseq = mseq;
-                       cap->seq = seq;
-                       cap->issue_seq = seq;
-                       spin_lock(&session->s_cap_lock);
-                       __ceph_queue_cap_release(session, cap);
-                       spin_unlock(&session->s_cap_lock);
+               switch (op) {
+               case CEPH_CAP_OP_IMPORT:
+               case CEPH_CAP_OP_REVOKE:
+               case CEPH_CAP_OP_GRANT:
+                       do_cap_release = true;
+                       break;
+               default:
+                       break;
                }
                goto flush_cap_releases;
        }
@@ -4252,6 +4265,14 @@ void ceph_handle_caps(struct ceph_mds_session *session,
                     inode, ceph_ino(inode), ceph_snap(inode),
                     session->s_mds);
                spin_unlock(&ci->i_ceph_lock);
+               switch (op) {
+               case CEPH_CAP_OP_REVOKE:
+               case CEPH_CAP_OP_GRANT:
+                       do_cap_release = true;
+                       break;
+               default:
+                       break;
+               }
                goto flush_cap_releases;
        }
 
@@ -4302,6 +4323,18 @@ flush_cap_releases:
         * along for the mds (who clearly thinks we still have this
         * cap).
         */
+       if (do_cap_release) {
+               cap = ceph_get_cap(mdsc, NULL);
+               cap->cap_ino = vino.ino;
+               cap->queue_release = 1;
+               cap->cap_id = le64_to_cpu(h->cap_id);
+               cap->mseq = mseq;
+               cap->seq = seq;
+               cap->issue_seq = seq;
+               spin_lock(&session->s_cap_lock);
+               __ceph_queue_cap_release(session, cap);
+               spin_unlock(&session->s_cap_lock);
+       }
        ceph_flush_cap_releases(mdsc, session);
        goto done;
 
index cb67ac821f0ee8740e6bb8b9be39fea07e7a1798..4a2b39d9a61a2038fffa2b1a783d17f03be5c113 100644 (file)
@@ -886,7 +886,8 @@ static int ceph_mknod(struct mnt_idmap *idmap, struct inode *dir,
        set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags);
        req->r_args.mknod.mode = cpu_to_le32(mode);
        req->r_args.mknod.rdev = cpu_to_le32(rdev);
-       req->r_dentry_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_AUTH_EXCL;
+       req->r_dentry_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_AUTH_EXCL |
+                            CEPH_CAP_XATTR_EXCL;
        req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
        if (as_ctx.pagelist) {
                req->r_pagelist = as_ctx.pagelist;
@@ -953,7 +954,8 @@ static int ceph_symlink(struct mnt_idmap *idmap, struct inode *dir,
        set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags);
        req->r_dentry = dget(dentry);
        req->r_num_caps = 2;
-       req->r_dentry_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_AUTH_EXCL;
+       req->r_dentry_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_AUTH_EXCL |
+                            CEPH_CAP_XATTR_EXCL;
        req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
        if (as_ctx.pagelist) {
                req->r_pagelist = as_ctx.pagelist;
@@ -1022,7 +1024,8 @@ static int ceph_mkdir(struct mnt_idmap *idmap, struct inode *dir,
        ihold(dir);
        set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags);
        req->r_args.mkdir.mode = cpu_to_le32(mode);
-       req->r_dentry_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_AUTH_EXCL;
+       req->r_dentry_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_AUTH_EXCL |
+                            CEPH_CAP_XATTR_EXCL;
        req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
        if (as_ctx.pagelist) {
                req->r_pagelist = as_ctx.pagelist;
@@ -1079,7 +1082,7 @@ static int ceph_link(struct dentry *old_dentry, struct inode *dir,
        req->r_parent = dir;
        ihold(dir);
        set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags);
-       req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
+       req->r_dentry_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_XATTR_EXCL;
        req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
        /* release LINK_SHARED on source inode (mds will lock it) */
        req->r_old_inode_drop = CEPH_CAP_LINK_SHARED | CEPH_CAP_LINK_EXCL;
@@ -1218,7 +1221,7 @@ retry:
        req->r_num_caps = 2;
        req->r_parent = dir;
        ihold(dir);
-       req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
+       req->r_dentry_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_XATTR_EXCL;
        req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
        req->r_inode_drop = ceph_drop_caps_for_unlink(inode);
 
@@ -1320,9 +1323,9 @@ static int ceph_rename(struct mnt_idmap *idmap, struct inode *old_dir,
        req->r_parent = new_dir;
        ihold(new_dir);
        set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags);
-       req->r_old_dentry_drop = CEPH_CAP_FILE_SHARED;
+       req->r_old_dentry_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_XATTR_EXCL;
        req->r_old_dentry_unless = CEPH_CAP_FILE_EXCL;
-       req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
+       req->r_dentry_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_XATTR_EXCL;
        req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
        /* release LINK_RDCACHE on source inode (mds will lock it) */
        req->r_old_inode_drop = CEPH_CAP_LINK_SHARED | CEPH_CAP_LINK_EXCL;
index b1925232dc08224e72d153fea15100ef32d182be..63efe5389783ce7aa3d61959188e00fb47e328dc 100644 (file)
@@ -791,7 +791,8 @@ retry:
        if (flags & O_CREAT) {
                struct ceph_file_layout lo;
 
-               req->r_dentry_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_AUTH_EXCL;
+               req->r_dentry_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_AUTH_EXCL |
+                                    CEPH_CAP_XATTR_EXCL;
                req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
                if (as_ctx.pagelist) {
                        req->r_pagelist = as_ctx.pagelist;
index 4c0f22acf53d294a1c80e93f5c9ee8ad55bb8e3d..66048a86c480ca9bd2075ea753cd013cf4593f1b 100644 (file)
@@ -645,6 +645,7 @@ bad:
        err = -EIO;
 out_bad:
        pr_err("mds parse_reply err %d\n", err);
+       ceph_msg_dump(msg);
        return err;
 }
 
@@ -3538,6 +3539,7 @@ static void handle_forward(struct ceph_mds_client *mdsc,
 
 bad:
        pr_err("mdsc_handle_forward decode error err=%d\n", err);
+       ceph_msg_dump(msg);
 }
 
 static int __decode_session_metadata(void **p, void *end,
@@ -5258,6 +5260,7 @@ void ceph_mdsc_handle_fsmap(struct ceph_mds_client *mdsc, struct ceph_msg *msg)
 bad:
        pr_err("error decoding fsmap %d. Shutting down mount.\n", err);
        ceph_umount_begin(mdsc->fsc->sb);
+       ceph_msg_dump(msg);
 err_out:
        mutex_lock(&mdsc->mutex);
        mdsc->mdsmap_err = err;
@@ -5326,6 +5329,7 @@ bad_unlock:
 bad:
        pr_err("error decoding mdsmap %d. Shutting down mount.\n", err);
        ceph_umount_begin(mdsc->fsc->sb);
+       ceph_msg_dump(msg);
        return;
 }
 
index c47347d2e84e3227d1f2f5383c14a2e0126bc648..cce78d769f5516fbc5ce7b880d64f197a5cafa17 100644 (file)
@@ -36,6 +36,14 @@ static bool ceph_mdsc_send_metrics(struct ceph_mds_client *mdsc,
        s32 items = 0;
        s32 len;
 
+       /* Do not send the metrics until the MDS rank is ready */
+       mutex_lock(&mdsc->mutex);
+       if (ceph_mdsmap_get_state(mdsc->mdsmap, s->s_mds) != CEPH_MDS_STATE_ACTIVE) {
+               mutex_unlock(&mdsc->mutex);
+               return false;
+       }
+       mutex_unlock(&mdsc->mutex);
+
        len = sizeof(*head) + sizeof(*cap) + sizeof(*read) + sizeof(*write)
              + sizeof(*meta) + sizeof(*dlease) + sizeof(*files)
              + sizeof(*icaps) + sizeof(*inodes) + sizeof(*rsize)
index 2e73ba62bd7aa73f6c0f39964d65b5aaf1ff05a2..343d738448dcd9e8c55d5dec2b67eca554334a99 100644 (file)
@@ -675,14 +675,17 @@ int __ceph_finish_cap_snap(struct ceph_inode_info *ci,
                return 0;
        }
 
-       /* Fb cap still in use, delay it */
-       if (ci->i_wb_ref) {
+       /*
+        * Defer flushing the capsnap if the dirty buffer not flushed yet.
+        * And trigger to flush the buffer immediately.
+        */
+       if (ci->i_wrbuffer_ref) {
                dout("%s %p %llx.%llx cap_snap %p snapc %p %llu %s s=%llu "
                     "used WRBUFFER, delaying\n", __func__, inode,
                     ceph_vinop(inode), capsnap, capsnap->context,
                     capsnap->context->seq, ceph_cap_string(capsnap->dirty),
                     capsnap->size);
-               capsnap->writing = 1;
+               ceph_queue_writeback(inode);
                return 0;
        }
 
index d24bf0db5234673d7fc2d49837071a5c6f3853ea..3bfddf34d488b3216bcbea56a881a20f8311df19 100644 (file)
@@ -451,6 +451,19 @@ struct ceph_inode_info {
        unsigned long  i_work_mask;
 };
 
+struct ceph_netfs_request_data {
+       int caps;
+
+       /*
+        * Maximum size of a file readahead request.
+        * The fadvise could update the bdi's default ra_pages.
+        */
+       unsigned int file_ra_pages;
+
+       /* Set it if fadvise disables file readahead entirely */
+       bool file_ra_disabled;
+};
+
 static inline struct ceph_inode_info *
 ceph_inode(const struct inode *inode)
 {