Merge tag 'platform-drivers-x86-v4.17-1' of git://git.infradead.org/linux-platform...
[sfrench/cifs-2.6.git] / fs / ceph / mds_client.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/ceph/ceph_debug.h>
3
4 #include <linux/fs.h>
5 #include <linux/wait.h>
6 #include <linux/slab.h>
7 #include <linux/gfp.h>
8 #include <linux/sched.h>
9 #include <linux/debugfs.h>
10 #include <linux/seq_file.h>
11 #include <linux/ratelimit.h>
12
13 #include "super.h"
14 #include "mds_client.h"
15
16 #include <linux/ceph/ceph_features.h>
17 #include <linux/ceph/messenger.h>
18 #include <linux/ceph/decode.h>
19 #include <linux/ceph/pagelist.h>
20 #include <linux/ceph/auth.h>
21 #include <linux/ceph/debugfs.h>
22
23 /*
24  * A cluster of MDS (metadata server) daemons is responsible for
25  * managing the file system namespace (the directory hierarchy and
26  * inodes) and for coordinating shared access to storage.  Metadata is
27  * partitioning hierarchically across a number of servers, and that
28  * partition varies over time as the cluster adjusts the distribution
29  * in order to balance load.
30  *
31  * The MDS client is primarily responsible to managing synchronous
32  * metadata requests for operations like open, unlink, and so forth.
33  * If there is a MDS failure, we find out about it when we (possibly
34  * request and) receive a new MDS map, and can resubmit affected
35  * requests.
36  *
37  * For the most part, though, we take advantage of a lossless
38  * communications channel to the MDS, and do not need to worry about
39  * timing out or resubmitting requests.
40  *
41  * We maintain a stateful "session" with each MDS we interact with.
42  * Within each session, we sent periodic heartbeat messages to ensure
43  * any capabilities or leases we have been issues remain valid.  If
44  * the session times out and goes stale, our leases and capabilities
45  * are no longer valid.
46  */
47
48 struct ceph_reconnect_state {
49         int nr_caps;
50         struct ceph_pagelist *pagelist;
51         unsigned msg_version;
52 };
53
54 static void __wake_requests(struct ceph_mds_client *mdsc,
55                             struct list_head *head);
56
57 static const struct ceph_connection_operations mds_con_ops;
58
59
60 /*
61  * mds reply parsing
62  */
63
64 /*
65  * parse individual inode info
66  */
67 static int parse_reply_info_in(void **p, void *end,
68                                struct ceph_mds_reply_info_in *info,
69                                u64 features)
70 {
71         int err = -EIO;
72
73         info->in = *p;
74         *p += sizeof(struct ceph_mds_reply_inode) +
75                 sizeof(*info->in->fragtree.splits) *
76                 le32_to_cpu(info->in->fragtree.nsplits);
77
78         ceph_decode_32_safe(p, end, info->symlink_len, bad);
79         ceph_decode_need(p, end, info->symlink_len, bad);
80         info->symlink = *p;
81         *p += info->symlink_len;
82
83         if (features & CEPH_FEATURE_DIRLAYOUTHASH)
84                 ceph_decode_copy_safe(p, end, &info->dir_layout,
85                                       sizeof(info->dir_layout), bad);
86         else
87                 memset(&info->dir_layout, 0, sizeof(info->dir_layout));
88
89         ceph_decode_32_safe(p, end, info->xattr_len, bad);
90         ceph_decode_need(p, end, info->xattr_len, bad);
91         info->xattr_data = *p;
92         *p += info->xattr_len;
93
94         if (features & CEPH_FEATURE_MDS_INLINE_DATA) {
95                 ceph_decode_64_safe(p, end, info->inline_version, bad);
96                 ceph_decode_32_safe(p, end, info->inline_len, bad);
97                 ceph_decode_need(p, end, info->inline_len, bad);
98                 info->inline_data = *p;
99                 *p += info->inline_len;
100         } else
101                 info->inline_version = CEPH_INLINE_NONE;
102
103         info->pool_ns_len = 0;
104         info->pool_ns_data = NULL;
105         if (features & CEPH_FEATURE_FS_FILE_LAYOUT_V2) {
106                 ceph_decode_32_safe(p, end, info->pool_ns_len, bad);
107                 if (info->pool_ns_len > 0) {
108                         ceph_decode_need(p, end, info->pool_ns_len, bad);
109                         info->pool_ns_data = *p;
110                         *p += info->pool_ns_len;
111                 }
112         }
113
114         return 0;
115 bad:
116         return err;
117 }
118
119 /*
120  * parse a normal reply, which may contain a (dir+)dentry and/or a
121  * target inode.
122  */
123 static int parse_reply_info_trace(void **p, void *end,
124                                   struct ceph_mds_reply_info_parsed *info,
125                                   u64 features)
126 {
127         int err;
128
129         if (info->head->is_dentry) {
130                 err = parse_reply_info_in(p, end, &info->diri, features);
131                 if (err < 0)
132                         goto out_bad;
133
134                 if (unlikely(*p + sizeof(*info->dirfrag) > end))
135                         goto bad;
136                 info->dirfrag = *p;
137                 *p += sizeof(*info->dirfrag) +
138                         sizeof(u32)*le32_to_cpu(info->dirfrag->ndist);
139                 if (unlikely(*p > end))
140                         goto bad;
141
142                 ceph_decode_32_safe(p, end, info->dname_len, bad);
143                 ceph_decode_need(p, end, info->dname_len, bad);
144                 info->dname = *p;
145                 *p += info->dname_len;
146                 info->dlease = *p;
147                 *p += sizeof(*info->dlease);
148         }
149
150         if (info->head->is_target) {
151                 err = parse_reply_info_in(p, end, &info->targeti, features);
152                 if (err < 0)
153                         goto out_bad;
154         }
155
156         if (unlikely(*p != end))
157                 goto bad;
158         return 0;
159
160 bad:
161         err = -EIO;
162 out_bad:
163         pr_err("problem parsing mds trace %d\n", err);
164         return err;
165 }
166
167 /*
168  * parse readdir results
169  */
170 static int parse_reply_info_dir(void **p, void *end,
171                                 struct ceph_mds_reply_info_parsed *info,
172                                 u64 features)
173 {
174         u32 num, i = 0;
175         int err;
176
177         info->dir_dir = *p;
178         if (*p + sizeof(*info->dir_dir) > end)
179                 goto bad;
180         *p += sizeof(*info->dir_dir) +
181                 sizeof(u32)*le32_to_cpu(info->dir_dir->ndist);
182         if (*p > end)
183                 goto bad;
184
185         ceph_decode_need(p, end, sizeof(num) + 2, bad);
186         num = ceph_decode_32(p);
187         {
188                 u16 flags = ceph_decode_16(p);
189                 info->dir_end = !!(flags & CEPH_READDIR_FRAG_END);
190                 info->dir_complete = !!(flags & CEPH_READDIR_FRAG_COMPLETE);
191                 info->hash_order = !!(flags & CEPH_READDIR_HASH_ORDER);
192                 info->offset_hash = !!(flags & CEPH_READDIR_OFFSET_HASH);
193         }
194         if (num == 0)
195                 goto done;
196
197         BUG_ON(!info->dir_entries);
198         if ((unsigned long)(info->dir_entries + num) >
199             (unsigned long)info->dir_entries + info->dir_buf_size) {
200                 pr_err("dir contents are larger than expected\n");
201                 WARN_ON(1);
202                 goto bad;
203         }
204
205         info->dir_nr = num;
206         while (num) {
207                 struct ceph_mds_reply_dir_entry *rde = info->dir_entries + i;
208                 /* dentry */
209                 ceph_decode_need(p, end, sizeof(u32)*2, bad);
210                 rde->name_len = ceph_decode_32(p);
211                 ceph_decode_need(p, end, rde->name_len, bad);
212                 rde->name = *p;
213                 *p += rde->name_len;
214                 dout("parsed dir dname '%.*s'\n", rde->name_len, rde->name);
215                 rde->lease = *p;
216                 *p += sizeof(struct ceph_mds_reply_lease);
217
218                 /* inode */
219                 err = parse_reply_info_in(p, end, &rde->inode, features);
220                 if (err < 0)
221                         goto out_bad;
222                 /* ceph_readdir_prepopulate() will update it */
223                 rde->offset = 0;
224                 i++;
225                 num--;
226         }
227
228 done:
229         if (*p != end)
230                 goto bad;
231         return 0;
232
233 bad:
234         err = -EIO;
235 out_bad:
236         pr_err("problem parsing dir contents %d\n", err);
237         return err;
238 }
239
240 /*
241  * parse fcntl F_GETLK results
242  */
243 static int parse_reply_info_filelock(void **p, void *end,
244                                      struct ceph_mds_reply_info_parsed *info,
245                                      u64 features)
246 {
247         if (*p + sizeof(*info->filelock_reply) > end)
248                 goto bad;
249
250         info->filelock_reply = *p;
251         *p += sizeof(*info->filelock_reply);
252
253         if (unlikely(*p != end))
254                 goto bad;
255         return 0;
256
257 bad:
258         return -EIO;
259 }
260
261 /*
262  * parse create results
263  */
264 static int parse_reply_info_create(void **p, void *end,
265                                   struct ceph_mds_reply_info_parsed *info,
266                                   u64 features)
267 {
268         if (features & CEPH_FEATURE_REPLY_CREATE_INODE) {
269                 if (*p == end) {
270                         info->has_create_ino = false;
271                 } else {
272                         info->has_create_ino = true;
273                         info->ino = ceph_decode_64(p);
274                 }
275         }
276
277         if (unlikely(*p != end))
278                 goto bad;
279         return 0;
280
281 bad:
282         return -EIO;
283 }
284
285 /*
286  * parse extra results
287  */
288 static int parse_reply_info_extra(void **p, void *end,
289                                   struct ceph_mds_reply_info_parsed *info,
290                                   u64 features)
291 {
292         u32 op = le32_to_cpu(info->head->op);
293
294         if (op == CEPH_MDS_OP_GETFILELOCK)
295                 return parse_reply_info_filelock(p, end, info, features);
296         else if (op == CEPH_MDS_OP_READDIR || op == CEPH_MDS_OP_LSSNAP)
297                 return parse_reply_info_dir(p, end, info, features);
298         else if (op == CEPH_MDS_OP_CREATE)
299                 return parse_reply_info_create(p, end, info, features);
300         else
301                 return -EIO;
302 }
303
304 /*
305  * parse entire mds reply
306  */
307 static int parse_reply_info(struct ceph_msg *msg,
308                             struct ceph_mds_reply_info_parsed *info,
309                             u64 features)
310 {
311         void *p, *end;
312         u32 len;
313         int err;
314
315         info->head = msg->front.iov_base;
316         p = msg->front.iov_base + sizeof(struct ceph_mds_reply_head);
317         end = p + msg->front.iov_len - sizeof(struct ceph_mds_reply_head);
318
319         /* trace */
320         ceph_decode_32_safe(&p, end, len, bad);
321         if (len > 0) {
322                 ceph_decode_need(&p, end, len, bad);
323                 err = parse_reply_info_trace(&p, p+len, info, features);
324                 if (err < 0)
325                         goto out_bad;
326         }
327
328         /* extra */
329         ceph_decode_32_safe(&p, end, len, bad);
330         if (len > 0) {
331                 ceph_decode_need(&p, end, len, bad);
332                 err = parse_reply_info_extra(&p, p+len, info, features);
333                 if (err < 0)
334                         goto out_bad;
335         }
336
337         /* snap blob */
338         ceph_decode_32_safe(&p, end, len, bad);
339         info->snapblob_len = len;
340         info->snapblob = p;
341         p += len;
342
343         if (p != end)
344                 goto bad;
345         return 0;
346
347 bad:
348         err = -EIO;
349 out_bad:
350         pr_err("mds parse_reply err %d\n", err);
351         return err;
352 }
353
354 static void destroy_reply_info(struct ceph_mds_reply_info_parsed *info)
355 {
356         if (!info->dir_entries)
357                 return;
358         free_pages((unsigned long)info->dir_entries, get_order(info->dir_buf_size));
359 }
360
361
362 /*
363  * sessions
364  */
365 const char *ceph_session_state_name(int s)
366 {
367         switch (s) {
368         case CEPH_MDS_SESSION_NEW: return "new";
369         case CEPH_MDS_SESSION_OPENING: return "opening";
370         case CEPH_MDS_SESSION_OPEN: return "open";
371         case CEPH_MDS_SESSION_HUNG: return "hung";
372         case CEPH_MDS_SESSION_CLOSING: return "closing";
373         case CEPH_MDS_SESSION_RESTARTING: return "restarting";
374         case CEPH_MDS_SESSION_RECONNECTING: return "reconnecting";
375         case CEPH_MDS_SESSION_REJECTED: return "rejected";
376         default: return "???";
377         }
378 }
379
380 static struct ceph_mds_session *get_session(struct ceph_mds_session *s)
381 {
382         if (refcount_inc_not_zero(&s->s_ref)) {
383                 dout("mdsc get_session %p %d -> %d\n", s,
384                      refcount_read(&s->s_ref)-1, refcount_read(&s->s_ref));
385                 return s;
386         } else {
387                 dout("mdsc get_session %p 0 -- FAIL", s);
388                 return NULL;
389         }
390 }
391
392 void ceph_put_mds_session(struct ceph_mds_session *s)
393 {
394         dout("mdsc put_session %p %d -> %d\n", s,
395              refcount_read(&s->s_ref), refcount_read(&s->s_ref)-1);
396         if (refcount_dec_and_test(&s->s_ref)) {
397                 if (s->s_auth.authorizer)
398                         ceph_auth_destroy_authorizer(s->s_auth.authorizer);
399                 kfree(s);
400         }
401 }
402
403 /*
404  * called under mdsc->mutex
405  */
406 struct ceph_mds_session *__ceph_lookup_mds_session(struct ceph_mds_client *mdsc,
407                                                    int mds)
408 {
409         struct ceph_mds_session *session;
410
411         if (mds >= mdsc->max_sessions || !mdsc->sessions[mds])
412                 return NULL;
413         session = mdsc->sessions[mds];
414         dout("lookup_mds_session %p %d\n", session,
415              refcount_read(&session->s_ref));
416         get_session(session);
417         return session;
418 }
419
420 static bool __have_session(struct ceph_mds_client *mdsc, int mds)
421 {
422         if (mds >= mdsc->max_sessions)
423                 return false;
424         return mdsc->sessions[mds];
425 }
426
427 static int __verify_registered_session(struct ceph_mds_client *mdsc,
428                                        struct ceph_mds_session *s)
429 {
430         if (s->s_mds >= mdsc->max_sessions ||
431             mdsc->sessions[s->s_mds] != s)
432                 return -ENOENT;
433         return 0;
434 }
435
436 /*
437  * create+register a new session for given mds.
438  * called under mdsc->mutex.
439  */
440 static struct ceph_mds_session *register_session(struct ceph_mds_client *mdsc,
441                                                  int mds)
442 {
443         struct ceph_mds_session *s;
444
445         if (mds >= mdsc->mdsmap->m_num_mds)
446                 return ERR_PTR(-EINVAL);
447
448         s = kzalloc(sizeof(*s), GFP_NOFS);
449         if (!s)
450                 return ERR_PTR(-ENOMEM);
451         s->s_mdsc = mdsc;
452         s->s_mds = mds;
453         s->s_state = CEPH_MDS_SESSION_NEW;
454         s->s_ttl = 0;
455         s->s_seq = 0;
456         mutex_init(&s->s_mutex);
457
458         ceph_con_init(&s->s_con, s, &mds_con_ops, &mdsc->fsc->client->msgr);
459
460         spin_lock_init(&s->s_gen_ttl_lock);
461         s->s_cap_gen = 0;
462         s->s_cap_ttl = jiffies - 1;
463
464         spin_lock_init(&s->s_cap_lock);
465         s->s_renew_requested = 0;
466         s->s_renew_seq = 0;
467         INIT_LIST_HEAD(&s->s_caps);
468         s->s_nr_caps = 0;
469         s->s_trim_caps = 0;
470         refcount_set(&s->s_ref, 1);
471         INIT_LIST_HEAD(&s->s_waiting);
472         INIT_LIST_HEAD(&s->s_unsafe);
473         s->s_num_cap_releases = 0;
474         s->s_cap_reconnect = 0;
475         s->s_cap_iterator = NULL;
476         INIT_LIST_HEAD(&s->s_cap_releases);
477         INIT_LIST_HEAD(&s->s_cap_flushing);
478
479         dout("register_session mds%d\n", mds);
480         if (mds >= mdsc->max_sessions) {
481                 int newmax = 1 << get_count_order(mds+1);
482                 struct ceph_mds_session **sa;
483
484                 dout("register_session realloc to %d\n", newmax);
485                 sa = kcalloc(newmax, sizeof(void *), GFP_NOFS);
486                 if (!sa)
487                         goto fail_realloc;
488                 if (mdsc->sessions) {
489                         memcpy(sa, mdsc->sessions,
490                                mdsc->max_sessions * sizeof(void *));
491                         kfree(mdsc->sessions);
492                 }
493                 mdsc->sessions = sa;
494                 mdsc->max_sessions = newmax;
495         }
496         mdsc->sessions[mds] = s;
497         atomic_inc(&mdsc->num_sessions);
498         refcount_inc(&s->s_ref);  /* one ref to sessions[], one to caller */
499
500         ceph_con_open(&s->s_con, CEPH_ENTITY_TYPE_MDS, mds,
501                       ceph_mdsmap_get_addr(mdsc->mdsmap, mds));
502
503         return s;
504
505 fail_realloc:
506         kfree(s);
507         return ERR_PTR(-ENOMEM);
508 }
509
510 /*
511  * called under mdsc->mutex
512  */
513 static void __unregister_session(struct ceph_mds_client *mdsc,
514                                struct ceph_mds_session *s)
515 {
516         dout("__unregister_session mds%d %p\n", s->s_mds, s);
517         BUG_ON(mdsc->sessions[s->s_mds] != s);
518         mdsc->sessions[s->s_mds] = NULL;
519         ceph_con_close(&s->s_con);
520         ceph_put_mds_session(s);
521         atomic_dec(&mdsc->num_sessions);
522 }
523
524 /*
525  * drop session refs in request.
526  *
527  * should be last request ref, or hold mdsc->mutex
528  */
529 static void put_request_session(struct ceph_mds_request *req)
530 {
531         if (req->r_session) {
532                 ceph_put_mds_session(req->r_session);
533                 req->r_session = NULL;
534         }
535 }
536
537 void ceph_mdsc_release_request(struct kref *kref)
538 {
539         struct ceph_mds_request *req = container_of(kref,
540                                                     struct ceph_mds_request,
541                                                     r_kref);
542         destroy_reply_info(&req->r_reply_info);
543         if (req->r_request)
544                 ceph_msg_put(req->r_request);
545         if (req->r_reply)
546                 ceph_msg_put(req->r_reply);
547         if (req->r_inode) {
548                 ceph_put_cap_refs(ceph_inode(req->r_inode), CEPH_CAP_PIN);
549                 iput(req->r_inode);
550         }
551         if (req->r_parent)
552                 ceph_put_cap_refs(ceph_inode(req->r_parent), CEPH_CAP_PIN);
553         iput(req->r_target_inode);
554         if (req->r_dentry)
555                 dput(req->r_dentry);
556         if (req->r_old_dentry)
557                 dput(req->r_old_dentry);
558         if (req->r_old_dentry_dir) {
559                 /*
560                  * track (and drop pins for) r_old_dentry_dir
561                  * separately, since r_old_dentry's d_parent may have
562                  * changed between the dir mutex being dropped and
563                  * this request being freed.
564                  */
565                 ceph_put_cap_refs(ceph_inode(req->r_old_dentry_dir),
566                                   CEPH_CAP_PIN);
567                 iput(req->r_old_dentry_dir);
568         }
569         kfree(req->r_path1);
570         kfree(req->r_path2);
571         if (req->r_pagelist)
572                 ceph_pagelist_release(req->r_pagelist);
573         put_request_session(req);
574         ceph_unreserve_caps(req->r_mdsc, &req->r_caps_reservation);
575         kfree(req);
576 }
577
578 DEFINE_RB_FUNCS(request, struct ceph_mds_request, r_tid, r_node)
579
580 /*
581  * lookup session, bump ref if found.
582  *
583  * called under mdsc->mutex.
584  */
585 static struct ceph_mds_request *
586 lookup_get_request(struct ceph_mds_client *mdsc, u64 tid)
587 {
588         struct ceph_mds_request *req;
589
590         req = lookup_request(&mdsc->request_tree, tid);
591         if (req)
592                 ceph_mdsc_get_request(req);
593
594         return req;
595 }
596
597 /*
598  * Register an in-flight request, and assign a tid.  Link to directory
599  * are modifying (if any).
600  *
601  * Called under mdsc->mutex.
602  */
603 static void __register_request(struct ceph_mds_client *mdsc,
604                                struct ceph_mds_request *req,
605                                struct inode *dir)
606 {
607         int ret = 0;
608
609         req->r_tid = ++mdsc->last_tid;
610         if (req->r_num_caps) {
611                 ret = ceph_reserve_caps(mdsc, &req->r_caps_reservation,
612                                         req->r_num_caps);
613                 if (ret < 0) {
614                         pr_err("__register_request %p "
615                                "failed to reserve caps: %d\n", req, ret);
616                         /* set req->r_err to fail early from __do_request */
617                         req->r_err = ret;
618                         return;
619                 }
620         }
621         dout("__register_request %p tid %lld\n", req, req->r_tid);
622         ceph_mdsc_get_request(req);
623         insert_request(&mdsc->request_tree, req);
624
625         req->r_uid = current_fsuid();
626         req->r_gid = current_fsgid();
627
628         if (mdsc->oldest_tid == 0 && req->r_op != CEPH_MDS_OP_SETFILELOCK)
629                 mdsc->oldest_tid = req->r_tid;
630
631         if (dir) {
632                 ihold(dir);
633                 req->r_unsafe_dir = dir;
634         }
635 }
636
637 static void __unregister_request(struct ceph_mds_client *mdsc,
638                                  struct ceph_mds_request *req)
639 {
640         dout("__unregister_request %p tid %lld\n", req, req->r_tid);
641
642         /* Never leave an unregistered request on an unsafe list! */
643         list_del_init(&req->r_unsafe_item);
644
645         if (req->r_tid == mdsc->oldest_tid) {
646                 struct rb_node *p = rb_next(&req->r_node);
647                 mdsc->oldest_tid = 0;
648                 while (p) {
649                         struct ceph_mds_request *next_req =
650                                 rb_entry(p, struct ceph_mds_request, r_node);
651                         if (next_req->r_op != CEPH_MDS_OP_SETFILELOCK) {
652                                 mdsc->oldest_tid = next_req->r_tid;
653                                 break;
654                         }
655                         p = rb_next(p);
656                 }
657         }
658
659         erase_request(&mdsc->request_tree, req);
660
661         if (req->r_unsafe_dir  &&
662             test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) {
663                 struct ceph_inode_info *ci = ceph_inode(req->r_unsafe_dir);
664                 spin_lock(&ci->i_unsafe_lock);
665                 list_del_init(&req->r_unsafe_dir_item);
666                 spin_unlock(&ci->i_unsafe_lock);
667         }
668         if (req->r_target_inode &&
669             test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) {
670                 struct ceph_inode_info *ci = ceph_inode(req->r_target_inode);
671                 spin_lock(&ci->i_unsafe_lock);
672                 list_del_init(&req->r_unsafe_target_item);
673                 spin_unlock(&ci->i_unsafe_lock);
674         }
675
676         if (req->r_unsafe_dir) {
677                 iput(req->r_unsafe_dir);
678                 req->r_unsafe_dir = NULL;
679         }
680
681         complete_all(&req->r_safe_completion);
682
683         ceph_mdsc_put_request(req);
684 }
685
686 /*
687  * Walk back up the dentry tree until we hit a dentry representing a
688  * non-snapshot inode. We do this using the rcu_read_lock (which must be held
689  * when calling this) to ensure that the objects won't disappear while we're
690  * working with them. Once we hit a candidate dentry, we attempt to take a
691  * reference to it, and return that as the result.
692  */
693 static struct inode *get_nonsnap_parent(struct dentry *dentry)
694 {
695         struct inode *inode = NULL;
696
697         while (dentry && !IS_ROOT(dentry)) {
698                 inode = d_inode_rcu(dentry);
699                 if (!inode || ceph_snap(inode) == CEPH_NOSNAP)
700                         break;
701                 dentry = dentry->d_parent;
702         }
703         if (inode)
704                 inode = igrab(inode);
705         return inode;
706 }
707
708 /*
709  * Choose mds to send request to next.  If there is a hint set in the
710  * request (e.g., due to a prior forward hint from the mds), use that.
711  * Otherwise, consult frag tree and/or caps to identify the
712  * appropriate mds.  If all else fails, choose randomly.
713  *
714  * Called under mdsc->mutex.
715  */
716 static int __choose_mds(struct ceph_mds_client *mdsc,
717                         struct ceph_mds_request *req)
718 {
719         struct inode *inode;
720         struct ceph_inode_info *ci;
721         struct ceph_cap *cap;
722         int mode = req->r_direct_mode;
723         int mds = -1;
724         u32 hash = req->r_direct_hash;
725         bool is_hash = test_bit(CEPH_MDS_R_DIRECT_IS_HASH, &req->r_req_flags);
726
727         /*
728          * is there a specific mds we should try?  ignore hint if we have
729          * no session and the mds is not up (active or recovering).
730          */
731         if (req->r_resend_mds >= 0 &&
732             (__have_session(mdsc, req->r_resend_mds) ||
733              ceph_mdsmap_get_state(mdsc->mdsmap, req->r_resend_mds) > 0)) {
734                 dout("choose_mds using resend_mds mds%d\n",
735                      req->r_resend_mds);
736                 return req->r_resend_mds;
737         }
738
739         if (mode == USE_RANDOM_MDS)
740                 goto random;
741
742         inode = NULL;
743         if (req->r_inode) {
744                 if (ceph_snap(req->r_inode) != CEPH_SNAPDIR) {
745                         inode = req->r_inode;
746                         ihold(inode);
747                 } else {
748                         /* req->r_dentry is non-null for LSSNAP request */
749                         rcu_read_lock();
750                         inode = get_nonsnap_parent(req->r_dentry);
751                         rcu_read_unlock();
752                         dout("__choose_mds using snapdir's parent %p\n", inode);
753                 }
754         } else if (req->r_dentry) {
755                 /* ignore race with rename; old or new d_parent is okay */
756                 struct dentry *parent;
757                 struct inode *dir;
758
759                 rcu_read_lock();
760                 parent = req->r_dentry->d_parent;
761                 dir = req->r_parent ? : d_inode_rcu(parent);
762
763                 if (!dir || dir->i_sb != mdsc->fsc->sb) {
764                         /*  not this fs or parent went negative */
765                         inode = d_inode(req->r_dentry);
766                         if (inode)
767                                 ihold(inode);
768                 } else if (ceph_snap(dir) != CEPH_NOSNAP) {
769                         /* direct snapped/virtual snapdir requests
770                          * based on parent dir inode */
771                         inode = get_nonsnap_parent(parent);
772                         dout("__choose_mds using nonsnap parent %p\n", inode);
773                 } else {
774                         /* dentry target */
775                         inode = d_inode(req->r_dentry);
776                         if (!inode || mode == USE_AUTH_MDS) {
777                                 /* dir + name */
778                                 inode = igrab(dir);
779                                 hash = ceph_dentry_hash(dir, req->r_dentry);
780                                 is_hash = true;
781                         } else {
782                                 ihold(inode);
783                         }
784                 }
785                 rcu_read_unlock();
786         }
787
788         dout("__choose_mds %p is_hash=%d (%d) mode %d\n", inode, (int)is_hash,
789              (int)hash, mode);
790         if (!inode)
791                 goto random;
792         ci = ceph_inode(inode);
793
794         if (is_hash && S_ISDIR(inode->i_mode)) {
795                 struct ceph_inode_frag frag;
796                 int found;
797
798                 ceph_choose_frag(ci, hash, &frag, &found);
799                 if (found) {
800                         if (mode == USE_ANY_MDS && frag.ndist > 0) {
801                                 u8 r;
802
803                                 /* choose a random replica */
804                                 get_random_bytes(&r, 1);
805                                 r %= frag.ndist;
806                                 mds = frag.dist[r];
807                                 dout("choose_mds %p %llx.%llx "
808                                      "frag %u mds%d (%d/%d)\n",
809                                      inode, ceph_vinop(inode),
810                                      frag.frag, mds,
811                                      (int)r, frag.ndist);
812                                 if (ceph_mdsmap_get_state(mdsc->mdsmap, mds) >=
813                                     CEPH_MDS_STATE_ACTIVE)
814                                         goto out;
815                         }
816
817                         /* since this file/dir wasn't known to be
818                          * replicated, then we want to look for the
819                          * authoritative mds. */
820                         mode = USE_AUTH_MDS;
821                         if (frag.mds >= 0) {
822                                 /* choose auth mds */
823                                 mds = frag.mds;
824                                 dout("choose_mds %p %llx.%llx "
825                                      "frag %u mds%d (auth)\n",
826                                      inode, ceph_vinop(inode), frag.frag, mds);
827                                 if (ceph_mdsmap_get_state(mdsc->mdsmap, mds) >=
828                                     CEPH_MDS_STATE_ACTIVE)
829                                         goto out;
830                         }
831                 }
832         }
833
834         spin_lock(&ci->i_ceph_lock);
835         cap = NULL;
836         if (mode == USE_AUTH_MDS)
837                 cap = ci->i_auth_cap;
838         if (!cap && !RB_EMPTY_ROOT(&ci->i_caps))
839                 cap = rb_entry(rb_first(&ci->i_caps), struct ceph_cap, ci_node);
840         if (!cap) {
841                 spin_unlock(&ci->i_ceph_lock);
842                 iput(inode);
843                 goto random;
844         }
845         mds = cap->session->s_mds;
846         dout("choose_mds %p %llx.%llx mds%d (%scap %p)\n",
847              inode, ceph_vinop(inode), mds,
848              cap == ci->i_auth_cap ? "auth " : "", cap);
849         spin_unlock(&ci->i_ceph_lock);
850 out:
851         iput(inode);
852         return mds;
853
854 random:
855         mds = ceph_mdsmap_get_random_mds(mdsc->mdsmap);
856         dout("choose_mds chose random mds%d\n", mds);
857         return mds;
858 }
859
860
861 /*
862  * session messages
863  */
864 static struct ceph_msg *create_session_msg(u32 op, u64 seq)
865 {
866         struct ceph_msg *msg;
867         struct ceph_mds_session_head *h;
868
869         msg = ceph_msg_new(CEPH_MSG_CLIENT_SESSION, sizeof(*h), GFP_NOFS,
870                            false);
871         if (!msg) {
872                 pr_err("create_session_msg ENOMEM creating msg\n");
873                 return NULL;
874         }
875         h = msg->front.iov_base;
876         h->op = cpu_to_le32(op);
877         h->seq = cpu_to_le64(seq);
878
879         return msg;
880 }
881
882 /*
883  * session message, specialization for CEPH_SESSION_REQUEST_OPEN
884  * to include additional client metadata fields.
885  */
886 static struct ceph_msg *create_session_open_msg(struct ceph_mds_client *mdsc, u64 seq)
887 {
888         struct ceph_msg *msg;
889         struct ceph_mds_session_head *h;
890         int i = -1;
891         int metadata_bytes = 0;
892         int metadata_key_count = 0;
893         struct ceph_options *opt = mdsc->fsc->client->options;
894         struct ceph_mount_options *fsopt = mdsc->fsc->mount_options;
895         void *p;
896
897         const char* metadata[][2] = {
898                 {"hostname", mdsc->nodename},
899                 {"kernel_version", init_utsname()->release},
900                 {"entity_id", opt->name ? : ""},
901                 {"root", fsopt->server_path ? : "/"},
902                 {NULL, NULL}
903         };
904
905         /* Calculate serialized length of metadata */
906         metadata_bytes = 4;  /* map length */
907         for (i = 0; metadata[i][0]; ++i) {
908                 metadata_bytes += 8 + strlen(metadata[i][0]) +
909                         strlen(metadata[i][1]);
910                 metadata_key_count++;
911         }
912
913         /* Allocate the message */
914         msg = ceph_msg_new(CEPH_MSG_CLIENT_SESSION, sizeof(*h) + metadata_bytes,
915                            GFP_NOFS, false);
916         if (!msg) {
917                 pr_err("create_session_msg ENOMEM creating msg\n");
918                 return NULL;
919         }
920         h = msg->front.iov_base;
921         h->op = cpu_to_le32(CEPH_SESSION_REQUEST_OPEN);
922         h->seq = cpu_to_le64(seq);
923
924         /*
925          * Serialize client metadata into waiting buffer space, using
926          * the format that userspace expects for map<string, string>
927          *
928          * ClientSession messages with metadata are v2
929          */
930         msg->hdr.version = cpu_to_le16(2);
931         msg->hdr.compat_version = cpu_to_le16(1);
932
933         /* The write pointer, following the session_head structure */
934         p = msg->front.iov_base + sizeof(*h);
935
936         /* Number of entries in the map */
937         ceph_encode_32(&p, metadata_key_count);
938
939         /* Two length-prefixed strings for each entry in the map */
940         for (i = 0; metadata[i][0]; ++i) {
941                 size_t const key_len = strlen(metadata[i][0]);
942                 size_t const val_len = strlen(metadata[i][1]);
943
944                 ceph_encode_32(&p, key_len);
945                 memcpy(p, metadata[i][0], key_len);
946                 p += key_len;
947                 ceph_encode_32(&p, val_len);
948                 memcpy(p, metadata[i][1], val_len);
949                 p += val_len;
950         }
951
952         return msg;
953 }
954
955 /*
956  * send session open request.
957  *
958  * called under mdsc->mutex
959  */
960 static int __open_session(struct ceph_mds_client *mdsc,
961                           struct ceph_mds_session *session)
962 {
963         struct ceph_msg *msg;
964         int mstate;
965         int mds = session->s_mds;
966
967         /* wait for mds to go active? */
968         mstate = ceph_mdsmap_get_state(mdsc->mdsmap, mds);
969         dout("open_session to mds%d (%s)\n", mds,
970              ceph_mds_state_name(mstate));
971         session->s_state = CEPH_MDS_SESSION_OPENING;
972         session->s_renew_requested = jiffies;
973
974         /* send connect message */
975         msg = create_session_open_msg(mdsc, session->s_seq);
976         if (!msg)
977                 return -ENOMEM;
978         ceph_con_send(&session->s_con, msg);
979         return 0;
980 }
981
982 /*
983  * open sessions for any export targets for the given mds
984  *
985  * called under mdsc->mutex
986  */
987 static struct ceph_mds_session *
988 __open_export_target_session(struct ceph_mds_client *mdsc, int target)
989 {
990         struct ceph_mds_session *session;
991
992         session = __ceph_lookup_mds_session(mdsc, target);
993         if (!session) {
994                 session = register_session(mdsc, target);
995                 if (IS_ERR(session))
996                         return session;
997         }
998         if (session->s_state == CEPH_MDS_SESSION_NEW ||
999             session->s_state == CEPH_MDS_SESSION_CLOSING)
1000                 __open_session(mdsc, session);
1001
1002         return session;
1003 }
1004
1005 struct ceph_mds_session *
1006 ceph_mdsc_open_export_target_session(struct ceph_mds_client *mdsc, int target)
1007 {
1008         struct ceph_mds_session *session;
1009
1010         dout("open_export_target_session to mds%d\n", target);
1011
1012         mutex_lock(&mdsc->mutex);
1013         session = __open_export_target_session(mdsc, target);
1014         mutex_unlock(&mdsc->mutex);
1015
1016         return session;
1017 }
1018
1019 static void __open_export_target_sessions(struct ceph_mds_client *mdsc,
1020                                           struct ceph_mds_session *session)
1021 {
1022         struct ceph_mds_info *mi;
1023         struct ceph_mds_session *ts;
1024         int i, mds = session->s_mds;
1025
1026         if (mds >= mdsc->mdsmap->m_num_mds)
1027                 return;
1028
1029         mi = &mdsc->mdsmap->m_info[mds];
1030         dout("open_export_target_sessions for mds%d (%d targets)\n",
1031              session->s_mds, mi->num_export_targets);
1032
1033         for (i = 0; i < mi->num_export_targets; i++) {
1034                 ts = __open_export_target_session(mdsc, mi->export_targets[i]);
1035                 if (!IS_ERR(ts))
1036                         ceph_put_mds_session(ts);
1037         }
1038 }
1039
1040 void ceph_mdsc_open_export_target_sessions(struct ceph_mds_client *mdsc,
1041                                            struct ceph_mds_session *session)
1042 {
1043         mutex_lock(&mdsc->mutex);
1044         __open_export_target_sessions(mdsc, session);
1045         mutex_unlock(&mdsc->mutex);
1046 }
1047
1048 /*
1049  * session caps
1050  */
1051
1052 static void detach_cap_releases(struct ceph_mds_session *session,
1053                                 struct list_head *target)
1054 {
1055         lockdep_assert_held(&session->s_cap_lock);
1056
1057         list_splice_init(&session->s_cap_releases, target);
1058         session->s_num_cap_releases = 0;
1059         dout("dispose_cap_releases mds%d\n", session->s_mds);
1060 }
1061
1062 static void dispose_cap_releases(struct ceph_mds_client *mdsc,
1063                                  struct list_head *dispose)
1064 {
1065         while (!list_empty(dispose)) {
1066                 struct ceph_cap *cap;
1067                 /* zero out the in-progress message */
1068                 cap = list_first_entry(dispose, struct ceph_cap, session_caps);
1069                 list_del(&cap->session_caps);
1070                 ceph_put_cap(mdsc, cap);
1071         }
1072 }
1073
1074 static void cleanup_session_requests(struct ceph_mds_client *mdsc,
1075                                      struct ceph_mds_session *session)
1076 {
1077         struct ceph_mds_request *req;
1078         struct rb_node *p;
1079
1080         dout("cleanup_session_requests mds%d\n", session->s_mds);
1081         mutex_lock(&mdsc->mutex);
1082         while (!list_empty(&session->s_unsafe)) {
1083                 req = list_first_entry(&session->s_unsafe,
1084                                        struct ceph_mds_request, r_unsafe_item);
1085                 pr_warn_ratelimited(" dropping unsafe request %llu\n",
1086                                     req->r_tid);
1087                 __unregister_request(mdsc, req);
1088         }
1089         /* zero r_attempts, so kick_requests() will re-send requests */
1090         p = rb_first(&mdsc->request_tree);
1091         while (p) {
1092                 req = rb_entry(p, struct ceph_mds_request, r_node);
1093                 p = rb_next(p);
1094                 if (req->r_session &&
1095                     req->r_session->s_mds == session->s_mds)
1096                         req->r_attempts = 0;
1097         }
1098         mutex_unlock(&mdsc->mutex);
1099 }
1100
1101 /*
1102  * Helper to safely iterate over all caps associated with a session, with
1103  * special care taken to handle a racing __ceph_remove_cap().
1104  *
1105  * Caller must hold session s_mutex.
1106  */
1107 static int iterate_session_caps(struct ceph_mds_session *session,
1108                                  int (*cb)(struct inode *, struct ceph_cap *,
1109                                             void *), void *arg)
1110 {
1111         struct list_head *p;
1112         struct ceph_cap *cap;
1113         struct inode *inode, *last_inode = NULL;
1114         struct ceph_cap *old_cap = NULL;
1115         int ret;
1116
1117         dout("iterate_session_caps %p mds%d\n", session, session->s_mds);
1118         spin_lock(&session->s_cap_lock);
1119         p = session->s_caps.next;
1120         while (p != &session->s_caps) {
1121                 cap = list_entry(p, struct ceph_cap, session_caps);
1122                 inode = igrab(&cap->ci->vfs_inode);
1123                 if (!inode) {
1124                         p = p->next;
1125                         continue;
1126                 }
1127                 session->s_cap_iterator = cap;
1128                 spin_unlock(&session->s_cap_lock);
1129
1130                 if (last_inode) {
1131                         iput(last_inode);
1132                         last_inode = NULL;
1133                 }
1134                 if (old_cap) {
1135                         ceph_put_cap(session->s_mdsc, old_cap);
1136                         old_cap = NULL;
1137                 }
1138
1139                 ret = cb(inode, cap, arg);
1140                 last_inode = inode;
1141
1142                 spin_lock(&session->s_cap_lock);
1143                 p = p->next;
1144                 if (!cap->ci) {
1145                         dout("iterate_session_caps  finishing cap %p removal\n",
1146                              cap);
1147                         BUG_ON(cap->session != session);
1148                         cap->session = NULL;
1149                         list_del_init(&cap->session_caps);
1150                         session->s_nr_caps--;
1151                         if (cap->queue_release) {
1152                                 list_add_tail(&cap->session_caps,
1153                                               &session->s_cap_releases);
1154                                 session->s_num_cap_releases++;
1155                         } else {
1156                                 old_cap = cap;  /* put_cap it w/o locks held */
1157                         }
1158                 }
1159                 if (ret < 0)
1160                         goto out;
1161         }
1162         ret = 0;
1163 out:
1164         session->s_cap_iterator = NULL;
1165         spin_unlock(&session->s_cap_lock);
1166
1167         iput(last_inode);
1168         if (old_cap)
1169                 ceph_put_cap(session->s_mdsc, old_cap);
1170
1171         return ret;
1172 }
1173
1174 static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
1175                                   void *arg)
1176 {
1177         struct ceph_fs_client *fsc = (struct ceph_fs_client *)arg;
1178         struct ceph_inode_info *ci = ceph_inode(inode);
1179         LIST_HEAD(to_remove);
1180         bool drop = false;
1181         bool invalidate = false;
1182
1183         dout("removing cap %p, ci is %p, inode is %p\n",
1184              cap, ci, &ci->vfs_inode);
1185         spin_lock(&ci->i_ceph_lock);
1186         __ceph_remove_cap(cap, false);
1187         if (!ci->i_auth_cap) {
1188                 struct ceph_cap_flush *cf;
1189                 struct ceph_mds_client *mdsc = fsc->mdsc;
1190
1191                 ci->i_ceph_flags |= CEPH_I_CAP_DROPPED;
1192
1193                 if (ci->i_wrbuffer_ref > 0 &&
1194                     READ_ONCE(fsc->mount_state) == CEPH_MOUNT_SHUTDOWN)
1195                         invalidate = true;
1196
1197                 while (!list_empty(&ci->i_cap_flush_list)) {
1198                         cf = list_first_entry(&ci->i_cap_flush_list,
1199                                               struct ceph_cap_flush, i_list);
1200                         list_move(&cf->i_list, &to_remove);
1201                 }
1202
1203                 spin_lock(&mdsc->cap_dirty_lock);
1204
1205                 list_for_each_entry(cf, &to_remove, i_list)
1206                         list_del(&cf->g_list);
1207
1208                 if (!list_empty(&ci->i_dirty_item)) {
1209                         pr_warn_ratelimited(
1210                                 " dropping dirty %s state for %p %lld\n",
1211                                 ceph_cap_string(ci->i_dirty_caps),
1212                                 inode, ceph_ino(inode));
1213                         ci->i_dirty_caps = 0;
1214                         list_del_init(&ci->i_dirty_item);
1215                         drop = true;
1216                 }
1217                 if (!list_empty(&ci->i_flushing_item)) {
1218                         pr_warn_ratelimited(
1219                                 " dropping dirty+flushing %s state for %p %lld\n",
1220                                 ceph_cap_string(ci->i_flushing_caps),
1221                                 inode, ceph_ino(inode));
1222                         ci->i_flushing_caps = 0;
1223                         list_del_init(&ci->i_flushing_item);
1224                         mdsc->num_cap_flushing--;
1225                         drop = true;
1226                 }
1227                 spin_unlock(&mdsc->cap_dirty_lock);
1228
1229                 if (atomic_read(&ci->i_filelock_ref) > 0) {
1230                         /* make further file lock syscall return -EIO */
1231                         ci->i_ceph_flags |= CEPH_I_ERROR_FILELOCK;
1232                         pr_warn_ratelimited(" dropping file locks for %p %lld\n",
1233                                             inode, ceph_ino(inode));
1234                 }
1235
1236                 if (!ci->i_dirty_caps && ci->i_prealloc_cap_flush) {
1237                         list_add(&ci->i_prealloc_cap_flush->i_list, &to_remove);
1238                         ci->i_prealloc_cap_flush = NULL;
1239                 }
1240         }
1241         spin_unlock(&ci->i_ceph_lock);
1242         while (!list_empty(&to_remove)) {
1243                 struct ceph_cap_flush *cf;
1244                 cf = list_first_entry(&to_remove,
1245                                       struct ceph_cap_flush, i_list);
1246                 list_del(&cf->i_list);
1247                 ceph_free_cap_flush(cf);
1248         }
1249
1250         wake_up_all(&ci->i_cap_wq);
1251         if (invalidate)
1252                 ceph_queue_invalidate(inode);
1253         if (drop)
1254                 iput(inode);
1255         return 0;
1256 }
1257
1258 /*
1259  * caller must hold session s_mutex
1260  */
1261 static void remove_session_caps(struct ceph_mds_session *session)
1262 {
1263         struct ceph_fs_client *fsc = session->s_mdsc->fsc;
1264         struct super_block *sb = fsc->sb;
1265         LIST_HEAD(dispose);
1266
1267         dout("remove_session_caps on %p\n", session);
1268         iterate_session_caps(session, remove_session_caps_cb, fsc);
1269
1270         wake_up_all(&fsc->mdsc->cap_flushing_wq);
1271
1272         spin_lock(&session->s_cap_lock);
1273         if (session->s_nr_caps > 0) {
1274                 struct inode *inode;
1275                 struct ceph_cap *cap, *prev = NULL;
1276                 struct ceph_vino vino;
1277                 /*
1278                  * iterate_session_caps() skips inodes that are being
1279                  * deleted, we need to wait until deletions are complete.
1280                  * __wait_on_freeing_inode() is designed for the job,
1281                  * but it is not exported, so use lookup inode function
1282                  * to access it.
1283                  */
1284                 while (!list_empty(&session->s_caps)) {
1285                         cap = list_entry(session->s_caps.next,
1286                                          struct ceph_cap, session_caps);
1287                         if (cap == prev)
1288                                 break;
1289                         prev = cap;
1290                         vino = cap->ci->i_vino;
1291                         spin_unlock(&session->s_cap_lock);
1292
1293                         inode = ceph_find_inode(sb, vino);
1294                         iput(inode);
1295
1296                         spin_lock(&session->s_cap_lock);
1297                 }
1298         }
1299
1300         // drop cap expires and unlock s_cap_lock
1301         detach_cap_releases(session, &dispose);
1302
1303         BUG_ON(session->s_nr_caps > 0);
1304         BUG_ON(!list_empty(&session->s_cap_flushing));
1305         spin_unlock(&session->s_cap_lock);
1306         dispose_cap_releases(session->s_mdsc, &dispose);
1307 }
1308
1309 /*
1310  * wake up any threads waiting on this session's caps.  if the cap is
1311  * old (didn't get renewed on the client reconnect), remove it now.
1312  *
1313  * caller must hold s_mutex.
1314  */
1315 static int wake_up_session_cb(struct inode *inode, struct ceph_cap *cap,
1316                               void *arg)
1317 {
1318         struct ceph_inode_info *ci = ceph_inode(inode);
1319
1320         if (arg) {
1321                 spin_lock(&ci->i_ceph_lock);
1322                 ci->i_wanted_max_size = 0;
1323                 ci->i_requested_max_size = 0;
1324                 spin_unlock(&ci->i_ceph_lock);
1325         }
1326         wake_up_all(&ci->i_cap_wq);
1327         return 0;
1328 }
1329
1330 static void wake_up_session_caps(struct ceph_mds_session *session,
1331                                  int reconnect)
1332 {
1333         dout("wake_up_session_caps %p mds%d\n", session, session->s_mds);
1334         iterate_session_caps(session, wake_up_session_cb,
1335                              (void *)(unsigned long)reconnect);
1336 }
1337
1338 /*
1339  * Send periodic message to MDS renewing all currently held caps.  The
1340  * ack will reset the expiration for all caps from this session.
1341  *
1342  * caller holds s_mutex
1343  */
1344 static int send_renew_caps(struct ceph_mds_client *mdsc,
1345                            struct ceph_mds_session *session)
1346 {
1347         struct ceph_msg *msg;
1348         int state;
1349
1350         if (time_after_eq(jiffies, session->s_cap_ttl) &&
1351             time_after_eq(session->s_cap_ttl, session->s_renew_requested))
1352                 pr_info("mds%d caps stale\n", session->s_mds);
1353         session->s_renew_requested = jiffies;
1354
1355         /* do not try to renew caps until a recovering mds has reconnected
1356          * with its clients. */
1357         state = ceph_mdsmap_get_state(mdsc->mdsmap, session->s_mds);
1358         if (state < CEPH_MDS_STATE_RECONNECT) {
1359                 dout("send_renew_caps ignoring mds%d (%s)\n",
1360                      session->s_mds, ceph_mds_state_name(state));
1361                 return 0;
1362         }
1363
1364         dout("send_renew_caps to mds%d (%s)\n", session->s_mds,
1365                 ceph_mds_state_name(state));
1366         msg = create_session_msg(CEPH_SESSION_REQUEST_RENEWCAPS,
1367                                  ++session->s_renew_seq);
1368         if (!msg)
1369                 return -ENOMEM;
1370         ceph_con_send(&session->s_con, msg);
1371         return 0;
1372 }
1373
1374 static int send_flushmsg_ack(struct ceph_mds_client *mdsc,
1375                              struct ceph_mds_session *session, u64 seq)
1376 {
1377         struct ceph_msg *msg;
1378
1379         dout("send_flushmsg_ack to mds%d (%s)s seq %lld\n",
1380              session->s_mds, ceph_session_state_name(session->s_state), seq);
1381         msg = create_session_msg(CEPH_SESSION_FLUSHMSG_ACK, seq);
1382         if (!msg)
1383                 return -ENOMEM;
1384         ceph_con_send(&session->s_con, msg);
1385         return 0;
1386 }
1387
1388
1389 /*
1390  * Note new cap ttl, and any transition from stale -> not stale (fresh?).
1391  *
1392  * Called under session->s_mutex
1393  */
1394 static void renewed_caps(struct ceph_mds_client *mdsc,
1395                          struct ceph_mds_session *session, int is_renew)
1396 {
1397         int was_stale;
1398         int wake = 0;
1399
1400         spin_lock(&session->s_cap_lock);
1401         was_stale = is_renew && time_after_eq(jiffies, session->s_cap_ttl);
1402
1403         session->s_cap_ttl = session->s_renew_requested +
1404                 mdsc->mdsmap->m_session_timeout*HZ;
1405
1406         if (was_stale) {
1407                 if (time_before(jiffies, session->s_cap_ttl)) {
1408                         pr_info("mds%d caps renewed\n", session->s_mds);
1409                         wake = 1;
1410                 } else {
1411                         pr_info("mds%d caps still stale\n", session->s_mds);
1412                 }
1413         }
1414         dout("renewed_caps mds%d ttl now %lu, was %s, now %s\n",
1415              session->s_mds, session->s_cap_ttl, was_stale ? "stale" : "fresh",
1416              time_before(jiffies, session->s_cap_ttl) ? "stale" : "fresh");
1417         spin_unlock(&session->s_cap_lock);
1418
1419         if (wake)
1420                 wake_up_session_caps(session, 0);
1421 }
1422
1423 /*
1424  * send a session close request
1425  */
1426 static int request_close_session(struct ceph_mds_client *mdsc,
1427                                  struct ceph_mds_session *session)
1428 {
1429         struct ceph_msg *msg;
1430
1431         dout("request_close_session mds%d state %s seq %lld\n",
1432              session->s_mds, ceph_session_state_name(session->s_state),
1433              session->s_seq);
1434         msg = create_session_msg(CEPH_SESSION_REQUEST_CLOSE, session->s_seq);
1435         if (!msg)
1436                 return -ENOMEM;
1437         ceph_con_send(&session->s_con, msg);
1438         return 1;
1439 }
1440
1441 /*
1442  * Called with s_mutex held.
1443  */
1444 static int __close_session(struct ceph_mds_client *mdsc,
1445                          struct ceph_mds_session *session)
1446 {
1447         if (session->s_state >= CEPH_MDS_SESSION_CLOSING)
1448                 return 0;
1449         session->s_state = CEPH_MDS_SESSION_CLOSING;
1450         return request_close_session(mdsc, session);
1451 }
1452
1453 static bool drop_negative_children(struct dentry *dentry)
1454 {
1455         struct dentry *child;
1456         bool all_negative = true;
1457
1458         if (!d_is_dir(dentry))
1459                 goto out;
1460
1461         spin_lock(&dentry->d_lock);
1462         list_for_each_entry(child, &dentry->d_subdirs, d_child) {
1463                 if (d_really_is_positive(child)) {
1464                         all_negative = false;
1465                         break;
1466                 }
1467         }
1468         spin_unlock(&dentry->d_lock);
1469
1470         if (all_negative)
1471                 shrink_dcache_parent(dentry);
1472 out:
1473         return all_negative;
1474 }
1475
1476 /*
1477  * Trim old(er) caps.
1478  *
1479  * Because we can't cache an inode without one or more caps, we do
1480  * this indirectly: if a cap is unused, we prune its aliases, at which
1481  * point the inode will hopefully get dropped to.
1482  *
1483  * Yes, this is a bit sloppy.  Our only real goal here is to respond to
1484  * memory pressure from the MDS, though, so it needn't be perfect.
1485  */
1486 static int trim_caps_cb(struct inode *inode, struct ceph_cap *cap, void *arg)
1487 {
1488         struct ceph_mds_session *session = arg;
1489         struct ceph_inode_info *ci = ceph_inode(inode);
1490         int used, wanted, oissued, mine;
1491
1492         if (session->s_trim_caps <= 0)
1493                 return -1;
1494
1495         spin_lock(&ci->i_ceph_lock);
1496         mine = cap->issued | cap->implemented;
1497         used = __ceph_caps_used(ci);
1498         wanted = __ceph_caps_file_wanted(ci);
1499         oissued = __ceph_caps_issued_other(ci, cap);
1500
1501         dout("trim_caps_cb %p cap %p mine %s oissued %s used %s wanted %s\n",
1502              inode, cap, ceph_cap_string(mine), ceph_cap_string(oissued),
1503              ceph_cap_string(used), ceph_cap_string(wanted));
1504         if (cap == ci->i_auth_cap) {
1505                 if (ci->i_dirty_caps || ci->i_flushing_caps ||
1506                     !list_empty(&ci->i_cap_snaps))
1507                         goto out;
1508                 if ((used | wanted) & CEPH_CAP_ANY_WR)
1509                         goto out;
1510                 /* Note: it's possible that i_filelock_ref becomes non-zero
1511                  * after dropping auth caps. It doesn't hurt because reply
1512                  * of lock mds request will re-add auth caps. */
1513                 if (atomic_read(&ci->i_filelock_ref) > 0)
1514                         goto out;
1515         }
1516         /* The inode has cached pages, but it's no longer used.
1517          * we can safely drop it */
1518         if (wanted == 0 && used == CEPH_CAP_FILE_CACHE &&
1519             !(oissued & CEPH_CAP_FILE_CACHE)) {
1520           used = 0;
1521           oissued = 0;
1522         }
1523         if ((used | wanted) & ~oissued & mine)
1524                 goto out;   /* we need these caps */
1525
1526         if (oissued) {
1527                 /* we aren't the only cap.. just remove us */
1528                 __ceph_remove_cap(cap, true);
1529                 session->s_trim_caps--;
1530         } else {
1531                 struct dentry *dentry;
1532                 /* try dropping referring dentries */
1533                 spin_unlock(&ci->i_ceph_lock);
1534                 dentry = d_find_any_alias(inode);
1535                 if (dentry && drop_negative_children(dentry)) {
1536                         int count;
1537                         dput(dentry);
1538                         d_prune_aliases(inode);
1539                         count = atomic_read(&inode->i_count);
1540                         if (count == 1)
1541                                 session->s_trim_caps--;
1542                         dout("trim_caps_cb %p cap %p pruned, count now %d\n",
1543                              inode, cap, count);
1544                 } else {
1545                         dput(dentry);
1546                 }
1547                 return 0;
1548         }
1549
1550 out:
1551         spin_unlock(&ci->i_ceph_lock);
1552         return 0;
1553 }
1554
1555 /*
1556  * Trim session cap count down to some max number.
1557  */
1558 int ceph_trim_caps(struct ceph_mds_client *mdsc,
1559                    struct ceph_mds_session *session,
1560                    int max_caps)
1561 {
1562         int trim_caps = session->s_nr_caps - max_caps;
1563
1564         dout("trim_caps mds%d start: %d / %d, trim %d\n",
1565              session->s_mds, session->s_nr_caps, max_caps, trim_caps);
1566         if (trim_caps > 0) {
1567                 session->s_trim_caps = trim_caps;
1568                 iterate_session_caps(session, trim_caps_cb, session);
1569                 dout("trim_caps mds%d done: %d / %d, trimmed %d\n",
1570                      session->s_mds, session->s_nr_caps, max_caps,
1571                         trim_caps - session->s_trim_caps);
1572                 session->s_trim_caps = 0;
1573         }
1574
1575         ceph_send_cap_releases(mdsc, session);
1576         return 0;
1577 }
1578
1579 static int check_caps_flush(struct ceph_mds_client *mdsc,
1580                             u64 want_flush_tid)
1581 {
1582         int ret = 1;
1583
1584         spin_lock(&mdsc->cap_dirty_lock);
1585         if (!list_empty(&mdsc->cap_flush_list)) {
1586                 struct ceph_cap_flush *cf =
1587                         list_first_entry(&mdsc->cap_flush_list,
1588                                          struct ceph_cap_flush, g_list);
1589                 if (cf->tid <= want_flush_tid) {
1590                         dout("check_caps_flush still flushing tid "
1591                              "%llu <= %llu\n", cf->tid, want_flush_tid);
1592                         ret = 0;
1593                 }
1594         }
1595         spin_unlock(&mdsc->cap_dirty_lock);
1596         return ret;
1597 }
1598
1599 /*
1600  * flush all dirty inode data to disk.
1601  *
1602  * returns true if we've flushed through want_flush_tid
1603  */
1604 static void wait_caps_flush(struct ceph_mds_client *mdsc,
1605                             u64 want_flush_tid)
1606 {
1607         dout("check_caps_flush want %llu\n", want_flush_tid);
1608
1609         wait_event(mdsc->cap_flushing_wq,
1610                    check_caps_flush(mdsc, want_flush_tid));
1611
1612         dout("check_caps_flush ok, flushed thru %llu\n", want_flush_tid);
1613 }
1614
1615 /*
1616  * called under s_mutex
1617  */
1618 void ceph_send_cap_releases(struct ceph_mds_client *mdsc,
1619                             struct ceph_mds_session *session)
1620 {
1621         struct ceph_msg *msg = NULL;
1622         struct ceph_mds_cap_release *head;
1623         struct ceph_mds_cap_item *item;
1624         struct ceph_osd_client *osdc = &mdsc->fsc->client->osdc;
1625         struct ceph_cap *cap;
1626         LIST_HEAD(tmp_list);
1627         int num_cap_releases;
1628         __le32  barrier, *cap_barrier;
1629
1630         down_read(&osdc->lock);
1631         barrier = cpu_to_le32(osdc->epoch_barrier);
1632         up_read(&osdc->lock);
1633
1634         spin_lock(&session->s_cap_lock);
1635 again:
1636         list_splice_init(&session->s_cap_releases, &tmp_list);
1637         num_cap_releases = session->s_num_cap_releases;
1638         session->s_num_cap_releases = 0;
1639         spin_unlock(&session->s_cap_lock);
1640
1641         while (!list_empty(&tmp_list)) {
1642                 if (!msg) {
1643                         msg = ceph_msg_new(CEPH_MSG_CLIENT_CAPRELEASE,
1644                                         PAGE_SIZE, GFP_NOFS, false);
1645                         if (!msg)
1646                                 goto out_err;
1647                         head = msg->front.iov_base;
1648                         head->num = cpu_to_le32(0);
1649                         msg->front.iov_len = sizeof(*head);
1650
1651                         msg->hdr.version = cpu_to_le16(2);
1652                         msg->hdr.compat_version = cpu_to_le16(1);
1653                 }
1654
1655                 cap = list_first_entry(&tmp_list, struct ceph_cap,
1656                                         session_caps);
1657                 list_del(&cap->session_caps);
1658                 num_cap_releases--;
1659
1660                 head = msg->front.iov_base;
1661                 le32_add_cpu(&head->num, 1);
1662                 item = msg->front.iov_base + msg->front.iov_len;
1663                 item->ino = cpu_to_le64(cap->cap_ino);
1664                 item->cap_id = cpu_to_le64(cap->cap_id);
1665                 item->migrate_seq = cpu_to_le32(cap->mseq);
1666                 item->seq = cpu_to_le32(cap->issue_seq);
1667                 msg->front.iov_len += sizeof(*item);
1668
1669                 ceph_put_cap(mdsc, cap);
1670
1671                 if (le32_to_cpu(head->num) == CEPH_CAPS_PER_RELEASE) {
1672                         // Append cap_barrier field
1673                         cap_barrier = msg->front.iov_base + msg->front.iov_len;
1674                         *cap_barrier = barrier;
1675                         msg->front.iov_len += sizeof(*cap_barrier);
1676
1677                         msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
1678                         dout("send_cap_releases mds%d %p\n", session->s_mds, msg);
1679                         ceph_con_send(&session->s_con, msg);
1680                         msg = NULL;
1681                 }
1682         }
1683
1684         BUG_ON(num_cap_releases != 0);
1685
1686         spin_lock(&session->s_cap_lock);
1687         if (!list_empty(&session->s_cap_releases))
1688                 goto again;
1689         spin_unlock(&session->s_cap_lock);
1690
1691         if (msg) {
1692                 // Append cap_barrier field
1693                 cap_barrier = msg->front.iov_base + msg->front.iov_len;
1694                 *cap_barrier = barrier;
1695                 msg->front.iov_len += sizeof(*cap_barrier);
1696
1697                 msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
1698                 dout("send_cap_releases mds%d %p\n", session->s_mds, msg);
1699                 ceph_con_send(&session->s_con, msg);
1700         }
1701         return;
1702 out_err:
1703         pr_err("send_cap_releases mds%d, failed to allocate message\n",
1704                 session->s_mds);
1705         spin_lock(&session->s_cap_lock);
1706         list_splice(&tmp_list, &session->s_cap_releases);
1707         session->s_num_cap_releases += num_cap_releases;
1708         spin_unlock(&session->s_cap_lock);
1709 }
1710
1711 /*
1712  * requests
1713  */
1714
1715 int ceph_alloc_readdir_reply_buffer(struct ceph_mds_request *req,
1716                                     struct inode *dir)
1717 {
1718         struct ceph_inode_info *ci = ceph_inode(dir);
1719         struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
1720         struct ceph_mount_options *opt = req->r_mdsc->fsc->mount_options;
1721         size_t size = sizeof(struct ceph_mds_reply_dir_entry);
1722         int order, num_entries;
1723
1724         spin_lock(&ci->i_ceph_lock);
1725         num_entries = ci->i_files + ci->i_subdirs;
1726         spin_unlock(&ci->i_ceph_lock);
1727         num_entries = max(num_entries, 1);
1728         num_entries = min(num_entries, opt->max_readdir);
1729
1730         order = get_order(size * num_entries);
1731         while (order >= 0) {
1732                 rinfo->dir_entries = (void*)__get_free_pages(GFP_KERNEL |
1733                                                              __GFP_NOWARN,
1734                                                              order);
1735                 if (rinfo->dir_entries)
1736                         break;
1737                 order--;
1738         }
1739         if (!rinfo->dir_entries)
1740                 return -ENOMEM;
1741
1742         num_entries = (PAGE_SIZE << order) / size;
1743         num_entries = min(num_entries, opt->max_readdir);
1744
1745         rinfo->dir_buf_size = PAGE_SIZE << order;
1746         req->r_num_caps = num_entries + 1;
1747         req->r_args.readdir.max_entries = cpu_to_le32(num_entries);
1748         req->r_args.readdir.max_bytes = cpu_to_le32(opt->max_readdir_bytes);
1749         return 0;
1750 }
1751
1752 /*
1753  * Create an mds request.
1754  */
1755 struct ceph_mds_request *
1756 ceph_mdsc_create_request(struct ceph_mds_client *mdsc, int op, int mode)
1757 {
1758         struct ceph_mds_request *req = kzalloc(sizeof(*req), GFP_NOFS);
1759
1760         if (!req)
1761                 return ERR_PTR(-ENOMEM);
1762
1763         mutex_init(&req->r_fill_mutex);
1764         req->r_mdsc = mdsc;
1765         req->r_started = jiffies;
1766         req->r_resend_mds = -1;
1767         INIT_LIST_HEAD(&req->r_unsafe_dir_item);
1768         INIT_LIST_HEAD(&req->r_unsafe_target_item);
1769         req->r_fmode = -1;
1770         kref_init(&req->r_kref);
1771         RB_CLEAR_NODE(&req->r_node);
1772         INIT_LIST_HEAD(&req->r_wait);
1773         init_completion(&req->r_completion);
1774         init_completion(&req->r_safe_completion);
1775         INIT_LIST_HEAD(&req->r_unsafe_item);
1776
1777         req->r_stamp = timespec_trunc(current_kernel_time(), mdsc->fsc->sb->s_time_gran);
1778
1779         req->r_op = op;
1780         req->r_direct_mode = mode;
1781         return req;
1782 }
1783
1784 /*
1785  * return oldest (lowest) request, tid in request tree, 0 if none.
1786  *
1787  * called under mdsc->mutex.
1788  */
1789 static struct ceph_mds_request *__get_oldest_req(struct ceph_mds_client *mdsc)
1790 {
1791         if (RB_EMPTY_ROOT(&mdsc->request_tree))
1792                 return NULL;
1793         return rb_entry(rb_first(&mdsc->request_tree),
1794                         struct ceph_mds_request, r_node);
1795 }
1796
1797 static inline  u64 __get_oldest_tid(struct ceph_mds_client *mdsc)
1798 {
1799         return mdsc->oldest_tid;
1800 }
1801
1802 /*
1803  * Build a dentry's path.  Allocate on heap; caller must kfree.  Based
1804  * on build_path_from_dentry in fs/cifs/dir.c.
1805  *
1806  * If @stop_on_nosnap, generate path relative to the first non-snapped
1807  * inode.
1808  *
1809  * Encode hidden .snap dirs as a double /, i.e.
1810  *   foo/.snap/bar -> foo//bar
1811  */
1812 char *ceph_mdsc_build_path(struct dentry *dentry, int *plen, u64 *base,
1813                            int stop_on_nosnap)
1814 {
1815         struct dentry *temp;
1816         char *path;
1817         int len, pos;
1818         unsigned seq;
1819
1820         if (!dentry)
1821                 return ERR_PTR(-EINVAL);
1822
1823 retry:
1824         len = 0;
1825         seq = read_seqbegin(&rename_lock);
1826         rcu_read_lock();
1827         for (temp = dentry; !IS_ROOT(temp);) {
1828                 struct inode *inode = d_inode(temp);
1829                 if (inode && ceph_snap(inode) == CEPH_SNAPDIR)
1830                         len++;  /* slash only */
1831                 else if (stop_on_nosnap && inode &&
1832                          ceph_snap(inode) == CEPH_NOSNAP)
1833                         break;
1834                 else
1835                         len += 1 + temp->d_name.len;
1836                 temp = temp->d_parent;
1837         }
1838         rcu_read_unlock();
1839         if (len)
1840                 len--;  /* no leading '/' */
1841
1842         path = kmalloc(len+1, GFP_NOFS);
1843         if (!path)
1844                 return ERR_PTR(-ENOMEM);
1845         pos = len;
1846         path[pos] = 0;  /* trailing null */
1847         rcu_read_lock();
1848         for (temp = dentry; !IS_ROOT(temp) && pos != 0; ) {
1849                 struct inode *inode;
1850
1851                 spin_lock(&temp->d_lock);
1852                 inode = d_inode(temp);
1853                 if (inode && ceph_snap(inode) == CEPH_SNAPDIR) {
1854                         dout("build_path path+%d: %p SNAPDIR\n",
1855                              pos, temp);
1856                 } else if (stop_on_nosnap && inode &&
1857                            ceph_snap(inode) == CEPH_NOSNAP) {
1858                         spin_unlock(&temp->d_lock);
1859                         break;
1860                 } else {
1861                         pos -= temp->d_name.len;
1862                         if (pos < 0) {
1863                                 spin_unlock(&temp->d_lock);
1864                                 break;
1865                         }
1866                         strncpy(path + pos, temp->d_name.name,
1867                                 temp->d_name.len);
1868                 }
1869                 spin_unlock(&temp->d_lock);
1870                 if (pos)
1871                         path[--pos] = '/';
1872                 temp = temp->d_parent;
1873         }
1874         rcu_read_unlock();
1875         if (pos != 0 || read_seqretry(&rename_lock, seq)) {
1876                 pr_err("build_path did not end path lookup where "
1877                        "expected, namelen is %d, pos is %d\n", len, pos);
1878                 /* presumably this is only possible if racing with a
1879                    rename of one of the parent directories (we can not
1880                    lock the dentries above us to prevent this, but
1881                    retrying should be harmless) */
1882                 kfree(path);
1883                 goto retry;
1884         }
1885
1886         *base = ceph_ino(d_inode(temp));
1887         *plen = len;
1888         dout("build_path on %p %d built %llx '%.*s'\n",
1889              dentry, d_count(dentry), *base, len, path);
1890         return path;
1891 }
1892
1893 static int build_dentry_path(struct dentry *dentry, struct inode *dir,
1894                              const char **ppath, int *ppathlen, u64 *pino,
1895                              int *pfreepath)
1896 {
1897         char *path;
1898
1899         rcu_read_lock();
1900         if (!dir)
1901                 dir = d_inode_rcu(dentry->d_parent);
1902         if (dir && ceph_snap(dir) == CEPH_NOSNAP) {
1903                 *pino = ceph_ino(dir);
1904                 rcu_read_unlock();
1905                 *ppath = dentry->d_name.name;
1906                 *ppathlen = dentry->d_name.len;
1907                 return 0;
1908         }
1909         rcu_read_unlock();
1910         path = ceph_mdsc_build_path(dentry, ppathlen, pino, 1);
1911         if (IS_ERR(path))
1912                 return PTR_ERR(path);
1913         *ppath = path;
1914         *pfreepath = 1;
1915         return 0;
1916 }
1917
1918 static int build_inode_path(struct inode *inode,
1919                             const char **ppath, int *ppathlen, u64 *pino,
1920                             int *pfreepath)
1921 {
1922         struct dentry *dentry;
1923         char *path;
1924
1925         if (ceph_snap(inode) == CEPH_NOSNAP) {
1926                 *pino = ceph_ino(inode);
1927                 *ppathlen = 0;
1928                 return 0;
1929         }
1930         dentry = d_find_alias(inode);
1931         path = ceph_mdsc_build_path(dentry, ppathlen, pino, 1);
1932         dput(dentry);
1933         if (IS_ERR(path))
1934                 return PTR_ERR(path);
1935         *ppath = path;
1936         *pfreepath = 1;
1937         return 0;
1938 }
1939
1940 /*
1941  * request arguments may be specified via an inode *, a dentry *, or
1942  * an explicit ino+path.
1943  */
1944 static int set_request_path_attr(struct inode *rinode, struct dentry *rdentry,
1945                                   struct inode *rdiri, const char *rpath,
1946                                   u64 rino, const char **ppath, int *pathlen,
1947                                   u64 *ino, int *freepath)
1948 {
1949         int r = 0;
1950
1951         if (rinode) {
1952                 r = build_inode_path(rinode, ppath, pathlen, ino, freepath);
1953                 dout(" inode %p %llx.%llx\n", rinode, ceph_ino(rinode),
1954                      ceph_snap(rinode));
1955         } else if (rdentry) {
1956                 r = build_dentry_path(rdentry, rdiri, ppath, pathlen, ino,
1957                                         freepath);
1958                 dout(" dentry %p %llx/%.*s\n", rdentry, *ino, *pathlen,
1959                      *ppath);
1960         } else if (rpath || rino) {
1961                 *ino = rino;
1962                 *ppath = rpath;
1963                 *pathlen = rpath ? strlen(rpath) : 0;
1964                 dout(" path %.*s\n", *pathlen, rpath);
1965         }
1966
1967         return r;
1968 }
1969
1970 /*
1971  * called under mdsc->mutex
1972  */
1973 static struct ceph_msg *create_request_message(struct ceph_mds_client *mdsc,
1974                                                struct ceph_mds_request *req,
1975                                                int mds, bool drop_cap_releases)
1976 {
1977         struct ceph_msg *msg;
1978         struct ceph_mds_request_head *head;
1979         const char *path1 = NULL;
1980         const char *path2 = NULL;
1981         u64 ino1 = 0, ino2 = 0;
1982         int pathlen1 = 0, pathlen2 = 0;
1983         int freepath1 = 0, freepath2 = 0;
1984         int len;
1985         u16 releases;
1986         void *p, *end;
1987         int ret;
1988
1989         ret = set_request_path_attr(req->r_inode, req->r_dentry,
1990                               req->r_parent, req->r_path1, req->r_ino1.ino,
1991                               &path1, &pathlen1, &ino1, &freepath1);
1992         if (ret < 0) {
1993                 msg = ERR_PTR(ret);
1994                 goto out;
1995         }
1996
1997         ret = set_request_path_attr(NULL, req->r_old_dentry,
1998                               req->r_old_dentry_dir,
1999                               req->r_path2, req->r_ino2.ino,
2000                               &path2, &pathlen2, &ino2, &freepath2);
2001         if (ret < 0) {
2002                 msg = ERR_PTR(ret);
2003                 goto out_free1;
2004         }
2005
2006         len = sizeof(*head) +
2007                 pathlen1 + pathlen2 + 2*(1 + sizeof(u32) + sizeof(u64)) +
2008                 sizeof(struct ceph_timespec);
2009
2010         /* calculate (max) length for cap releases */
2011         len += sizeof(struct ceph_mds_request_release) *
2012                 (!!req->r_inode_drop + !!req->r_dentry_drop +
2013                  !!req->r_old_inode_drop + !!req->r_old_dentry_drop);
2014         if (req->r_dentry_drop)
2015                 len += req->r_dentry->d_name.len;
2016         if (req->r_old_dentry_drop)
2017                 len += req->r_old_dentry->d_name.len;
2018
2019         msg = ceph_msg_new(CEPH_MSG_CLIENT_REQUEST, len, GFP_NOFS, false);
2020         if (!msg) {
2021                 msg = ERR_PTR(-ENOMEM);
2022                 goto out_free2;
2023         }
2024
2025         msg->hdr.version = cpu_to_le16(2);
2026         msg->hdr.tid = cpu_to_le64(req->r_tid);
2027
2028         head = msg->front.iov_base;
2029         p = msg->front.iov_base + sizeof(*head);
2030         end = msg->front.iov_base + msg->front.iov_len;
2031
2032         head->mdsmap_epoch = cpu_to_le32(mdsc->mdsmap->m_epoch);
2033         head->op = cpu_to_le32(req->r_op);
2034         head->caller_uid = cpu_to_le32(from_kuid(&init_user_ns, req->r_uid));
2035         head->caller_gid = cpu_to_le32(from_kgid(&init_user_ns, req->r_gid));
2036         head->args = req->r_args;
2037
2038         ceph_encode_filepath(&p, end, ino1, path1);
2039         ceph_encode_filepath(&p, end, ino2, path2);
2040
2041         /* make note of release offset, in case we need to replay */
2042         req->r_request_release_offset = p - msg->front.iov_base;
2043
2044         /* cap releases */
2045         releases = 0;
2046         if (req->r_inode_drop)
2047                 releases += ceph_encode_inode_release(&p,
2048                       req->r_inode ? req->r_inode : d_inode(req->r_dentry),
2049                       mds, req->r_inode_drop, req->r_inode_unless, 0);
2050         if (req->r_dentry_drop)
2051                 releases += ceph_encode_dentry_release(&p, req->r_dentry,
2052                                 req->r_parent, mds, req->r_dentry_drop,
2053                                 req->r_dentry_unless);
2054         if (req->r_old_dentry_drop)
2055                 releases += ceph_encode_dentry_release(&p, req->r_old_dentry,
2056                                 req->r_old_dentry_dir, mds,
2057                                 req->r_old_dentry_drop,
2058                                 req->r_old_dentry_unless);
2059         if (req->r_old_inode_drop)
2060                 releases += ceph_encode_inode_release(&p,
2061                       d_inode(req->r_old_dentry),
2062                       mds, req->r_old_inode_drop, req->r_old_inode_unless, 0);
2063
2064         if (drop_cap_releases) {
2065                 releases = 0;
2066                 p = msg->front.iov_base + req->r_request_release_offset;
2067         }
2068
2069         head->num_releases = cpu_to_le16(releases);
2070
2071         /* time stamp */
2072         {
2073                 struct ceph_timespec ts;
2074                 ceph_encode_timespec(&ts, &req->r_stamp);
2075                 ceph_encode_copy(&p, &ts, sizeof(ts));
2076         }
2077
2078         BUG_ON(p > end);
2079         msg->front.iov_len = p - msg->front.iov_base;
2080         msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
2081
2082         if (req->r_pagelist) {
2083                 struct ceph_pagelist *pagelist = req->r_pagelist;
2084                 refcount_inc(&pagelist->refcnt);
2085                 ceph_msg_data_add_pagelist(msg, pagelist);
2086                 msg->hdr.data_len = cpu_to_le32(pagelist->length);
2087         } else {
2088                 msg->hdr.data_len = 0;
2089         }
2090
2091         msg->hdr.data_off = cpu_to_le16(0);
2092
2093 out_free2:
2094         if (freepath2)
2095                 kfree((char *)path2);
2096 out_free1:
2097         if (freepath1)
2098                 kfree((char *)path1);
2099 out:
2100         return msg;
2101 }
2102
2103 /*
2104  * called under mdsc->mutex if error, under no mutex if
2105  * success.
2106  */
2107 static void complete_request(struct ceph_mds_client *mdsc,
2108                              struct ceph_mds_request *req)
2109 {
2110         if (req->r_callback)
2111                 req->r_callback(mdsc, req);
2112         else
2113                 complete_all(&req->r_completion);
2114 }
2115
2116 /*
2117  * called under mdsc->mutex
2118  */
2119 static int __prepare_send_request(struct ceph_mds_client *mdsc,
2120                                   struct ceph_mds_request *req,
2121                                   int mds, bool drop_cap_releases)
2122 {
2123         struct ceph_mds_request_head *rhead;
2124         struct ceph_msg *msg;
2125         int flags = 0;
2126
2127         req->r_attempts++;
2128         if (req->r_inode) {
2129                 struct ceph_cap *cap =
2130                         ceph_get_cap_for_mds(ceph_inode(req->r_inode), mds);
2131
2132                 if (cap)
2133                         req->r_sent_on_mseq = cap->mseq;
2134                 else
2135                         req->r_sent_on_mseq = -1;
2136         }
2137         dout("prepare_send_request %p tid %lld %s (attempt %d)\n", req,
2138              req->r_tid, ceph_mds_op_name(req->r_op), req->r_attempts);
2139
2140         if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) {
2141                 void *p;
2142                 /*
2143                  * Replay.  Do not regenerate message (and rebuild
2144                  * paths, etc.); just use the original message.
2145                  * Rebuilding paths will break for renames because
2146                  * d_move mangles the src name.
2147                  */
2148                 msg = req->r_request;
2149                 rhead = msg->front.iov_base;
2150
2151                 flags = le32_to_cpu(rhead->flags);
2152                 flags |= CEPH_MDS_FLAG_REPLAY;
2153                 rhead->flags = cpu_to_le32(flags);
2154
2155                 if (req->r_target_inode)
2156                         rhead->ino = cpu_to_le64(ceph_ino(req->r_target_inode));
2157
2158                 rhead->num_retry = req->r_attempts - 1;
2159
2160                 /* remove cap/dentry releases from message */
2161                 rhead->num_releases = 0;
2162
2163                 /* time stamp */
2164                 p = msg->front.iov_base + req->r_request_release_offset;
2165                 {
2166                         struct ceph_timespec ts;
2167                         ceph_encode_timespec(&ts, &req->r_stamp);
2168                         ceph_encode_copy(&p, &ts, sizeof(ts));
2169                 }
2170
2171                 msg->front.iov_len = p - msg->front.iov_base;
2172                 msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
2173                 return 0;
2174         }
2175
2176         if (req->r_request) {
2177                 ceph_msg_put(req->r_request);
2178                 req->r_request = NULL;
2179         }
2180         msg = create_request_message(mdsc, req, mds, drop_cap_releases);
2181         if (IS_ERR(msg)) {
2182                 req->r_err = PTR_ERR(msg);
2183                 return PTR_ERR(msg);
2184         }
2185         req->r_request = msg;
2186
2187         rhead = msg->front.iov_base;
2188         rhead->oldest_client_tid = cpu_to_le64(__get_oldest_tid(mdsc));
2189         if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags))
2190                 flags |= CEPH_MDS_FLAG_REPLAY;
2191         if (req->r_parent)
2192                 flags |= CEPH_MDS_FLAG_WANT_DENTRY;
2193         rhead->flags = cpu_to_le32(flags);
2194         rhead->num_fwd = req->r_num_fwd;
2195         rhead->num_retry = req->r_attempts - 1;
2196         rhead->ino = 0;
2197
2198         dout(" r_parent = %p\n", req->r_parent);
2199         return 0;
2200 }
2201
2202 /*
2203  * send request, or put it on the appropriate wait list.
2204  */
2205 static int __do_request(struct ceph_mds_client *mdsc,
2206                         struct ceph_mds_request *req)
2207 {
2208         struct ceph_mds_session *session = NULL;
2209         int mds = -1;
2210         int err = 0;
2211
2212         if (req->r_err || test_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags)) {
2213                 if (test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags))
2214                         __unregister_request(mdsc, req);
2215                 goto out;
2216         }
2217
2218         if (req->r_timeout &&
2219             time_after_eq(jiffies, req->r_started + req->r_timeout)) {
2220                 dout("do_request timed out\n");
2221                 err = -EIO;
2222                 goto finish;
2223         }
2224         if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) {
2225                 dout("do_request forced umount\n");
2226                 err = -EIO;
2227                 goto finish;
2228         }
2229         if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_MOUNTING) {
2230                 if (mdsc->mdsmap_err) {
2231                         err = mdsc->mdsmap_err;
2232                         dout("do_request mdsmap err %d\n", err);
2233                         goto finish;
2234                 }
2235                 if (mdsc->mdsmap->m_epoch == 0) {
2236                         dout("do_request no mdsmap, waiting for map\n");
2237                         list_add(&req->r_wait, &mdsc->waiting_for_map);
2238                         goto finish;
2239                 }
2240                 if (!(mdsc->fsc->mount_options->flags &
2241                       CEPH_MOUNT_OPT_MOUNTWAIT) &&
2242                     !ceph_mdsmap_is_cluster_available(mdsc->mdsmap)) {
2243                         err = -ENOENT;
2244                         pr_info("probably no mds server is up\n");
2245                         goto finish;
2246                 }
2247         }
2248
2249         put_request_session(req);
2250
2251         mds = __choose_mds(mdsc, req);
2252         if (mds < 0 ||
2253             ceph_mdsmap_get_state(mdsc->mdsmap, mds) < CEPH_MDS_STATE_ACTIVE) {
2254                 dout("do_request no mds or not active, waiting for map\n");
2255                 list_add(&req->r_wait, &mdsc->waiting_for_map);
2256                 goto out;
2257         }
2258
2259         /* get, open session */
2260         session = __ceph_lookup_mds_session(mdsc, mds);
2261         if (!session) {
2262                 session = register_session(mdsc, mds);
2263                 if (IS_ERR(session)) {
2264                         err = PTR_ERR(session);
2265                         goto finish;
2266                 }
2267         }
2268         req->r_session = get_session(session);
2269
2270         dout("do_request mds%d session %p state %s\n", mds, session,
2271              ceph_session_state_name(session->s_state));
2272         if (session->s_state != CEPH_MDS_SESSION_OPEN &&
2273             session->s_state != CEPH_MDS_SESSION_HUNG) {
2274                 if (session->s_state == CEPH_MDS_SESSION_REJECTED) {
2275                         err = -EACCES;
2276                         goto out_session;
2277                 }
2278                 if (session->s_state == CEPH_MDS_SESSION_NEW ||
2279                     session->s_state == CEPH_MDS_SESSION_CLOSING)
2280                         __open_session(mdsc, session);
2281                 list_add(&req->r_wait, &session->s_waiting);
2282                 goto out_session;
2283         }
2284
2285         /* send request */
2286         req->r_resend_mds = -1;   /* forget any previous mds hint */
2287
2288         if (req->r_request_started == 0)   /* note request start time */
2289                 req->r_request_started = jiffies;
2290
2291         err = __prepare_send_request(mdsc, req, mds, false);
2292         if (!err) {
2293                 ceph_msg_get(req->r_request);
2294                 ceph_con_send(&session->s_con, req->r_request);
2295         }
2296
2297 out_session:
2298         ceph_put_mds_session(session);
2299 finish:
2300         if (err) {
2301                 dout("__do_request early error %d\n", err);
2302                 req->r_err = err;
2303                 complete_request(mdsc, req);
2304                 __unregister_request(mdsc, req);
2305         }
2306 out:
2307         return err;
2308 }
2309
2310 /*
2311  * called under mdsc->mutex
2312  */
2313 static void __wake_requests(struct ceph_mds_client *mdsc,
2314                             struct list_head *head)
2315 {
2316         struct ceph_mds_request *req;
2317         LIST_HEAD(tmp_list);
2318
2319         list_splice_init(head, &tmp_list);
2320
2321         while (!list_empty(&tmp_list)) {
2322                 req = list_entry(tmp_list.next,
2323                                  struct ceph_mds_request, r_wait);
2324                 list_del_init(&req->r_wait);
2325                 dout(" wake request %p tid %llu\n", req, req->r_tid);
2326                 __do_request(mdsc, req);
2327         }
2328 }
2329
2330 /*
2331  * Wake up threads with requests pending for @mds, so that they can
2332  * resubmit their requests to a possibly different mds.
2333  */
2334 static void kick_requests(struct ceph_mds_client *mdsc, int mds)
2335 {
2336         struct ceph_mds_request *req;
2337         struct rb_node *p = rb_first(&mdsc->request_tree);
2338
2339         dout("kick_requests mds%d\n", mds);
2340         while (p) {
2341                 req = rb_entry(p, struct ceph_mds_request, r_node);
2342                 p = rb_next(p);
2343                 if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags))
2344                         continue;
2345                 if (req->r_attempts > 0)
2346                         continue; /* only new requests */
2347                 if (req->r_session &&
2348                     req->r_session->s_mds == mds) {
2349                         dout(" kicking tid %llu\n", req->r_tid);
2350                         list_del_init(&req->r_wait);
2351                         __do_request(mdsc, req);
2352                 }
2353         }
2354 }
2355
2356 void ceph_mdsc_submit_request(struct ceph_mds_client *mdsc,
2357                               struct ceph_mds_request *req)
2358 {
2359         dout("submit_request on %p\n", req);
2360         mutex_lock(&mdsc->mutex);
2361         __register_request(mdsc, req, NULL);
2362         __do_request(mdsc, req);
2363         mutex_unlock(&mdsc->mutex);
2364 }
2365
2366 /*
2367  * Synchrously perform an mds request.  Take care of all of the
2368  * session setup, forwarding, retry details.
2369  */
2370 int ceph_mdsc_do_request(struct ceph_mds_client *mdsc,
2371                          struct inode *dir,
2372                          struct ceph_mds_request *req)
2373 {
2374         int err;
2375
2376         dout("do_request on %p\n", req);
2377
2378         /* take CAP_PIN refs for r_inode, r_parent, r_old_dentry */
2379         if (req->r_inode)
2380                 ceph_get_cap_refs(ceph_inode(req->r_inode), CEPH_CAP_PIN);
2381         if (req->r_parent)
2382                 ceph_get_cap_refs(ceph_inode(req->r_parent), CEPH_CAP_PIN);
2383         if (req->r_old_dentry_dir)
2384                 ceph_get_cap_refs(ceph_inode(req->r_old_dentry_dir),
2385                                   CEPH_CAP_PIN);
2386
2387         /* issue */
2388         mutex_lock(&mdsc->mutex);
2389         __register_request(mdsc, req, dir);
2390         __do_request(mdsc, req);
2391
2392         if (req->r_err) {
2393                 err = req->r_err;
2394                 goto out;
2395         }
2396
2397         /* wait */
2398         mutex_unlock(&mdsc->mutex);
2399         dout("do_request waiting\n");
2400         if (!req->r_timeout && req->r_wait_for_completion) {
2401                 err = req->r_wait_for_completion(mdsc, req);
2402         } else {
2403                 long timeleft = wait_for_completion_killable_timeout(
2404                                         &req->r_completion,
2405                                         ceph_timeout_jiffies(req->r_timeout));
2406                 if (timeleft > 0)
2407                         err = 0;
2408                 else if (!timeleft)
2409                         err = -EIO;  /* timed out */
2410                 else
2411                         err = timeleft;  /* killed */
2412         }
2413         dout("do_request waited, got %d\n", err);
2414         mutex_lock(&mdsc->mutex);
2415
2416         /* only abort if we didn't race with a real reply */
2417         if (test_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags)) {
2418                 err = le32_to_cpu(req->r_reply_info.head->result);
2419         } else if (err < 0) {
2420                 dout("aborted request %lld with %d\n", req->r_tid, err);
2421
2422                 /*
2423                  * ensure we aren't running concurrently with
2424                  * ceph_fill_trace or ceph_readdir_prepopulate, which
2425                  * rely on locks (dir mutex) held by our caller.
2426                  */
2427                 mutex_lock(&req->r_fill_mutex);
2428                 req->r_err = err;
2429                 set_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags);
2430                 mutex_unlock(&req->r_fill_mutex);
2431
2432                 if (req->r_parent &&
2433                     (req->r_op & CEPH_MDS_OP_WRITE))
2434                         ceph_invalidate_dir_request(req);
2435         } else {
2436                 err = req->r_err;
2437         }
2438
2439 out:
2440         mutex_unlock(&mdsc->mutex);
2441         dout("do_request %p done, result %d\n", req, err);
2442         return err;
2443 }
2444
2445 /*
2446  * Invalidate dir's completeness, dentry lease state on an aborted MDS
2447  * namespace request.
2448  */
2449 void ceph_invalidate_dir_request(struct ceph_mds_request *req)
2450 {
2451         struct inode *dir = req->r_parent;
2452         struct inode *old_dir = req->r_old_dentry_dir;
2453
2454         dout("invalidate_dir_request %p %p (complete, lease(s))\n", dir, old_dir);
2455
2456         ceph_dir_clear_complete(dir);
2457         if (old_dir)
2458                 ceph_dir_clear_complete(old_dir);
2459         if (req->r_dentry)
2460                 ceph_invalidate_dentry_lease(req->r_dentry);
2461         if (req->r_old_dentry)
2462                 ceph_invalidate_dentry_lease(req->r_old_dentry);
2463 }
2464
2465 /*
2466  * Handle mds reply.
2467  *
2468  * We take the session mutex and parse and process the reply immediately.
2469  * This preserves the logical ordering of replies, capabilities, etc., sent
2470  * by the MDS as they are applied to our local cache.
2471  */
2472 static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
2473 {
2474         struct ceph_mds_client *mdsc = session->s_mdsc;
2475         struct ceph_mds_request *req;
2476         struct ceph_mds_reply_head *head = msg->front.iov_base;
2477         struct ceph_mds_reply_info_parsed *rinfo;  /* parsed reply info */
2478         struct ceph_snap_realm *realm;
2479         u64 tid;
2480         int err, result;
2481         int mds = session->s_mds;
2482
2483         if (msg->front.iov_len < sizeof(*head)) {
2484                 pr_err("mdsc_handle_reply got corrupt (short) reply\n");
2485                 ceph_msg_dump(msg);
2486                 return;
2487         }
2488
2489         /* get request, session */
2490         tid = le64_to_cpu(msg->hdr.tid);
2491         mutex_lock(&mdsc->mutex);
2492         req = lookup_get_request(mdsc, tid);
2493         if (!req) {
2494                 dout("handle_reply on unknown tid %llu\n", tid);
2495                 mutex_unlock(&mdsc->mutex);
2496                 return;
2497         }
2498         dout("handle_reply %p\n", req);
2499
2500         /* correct session? */
2501         if (req->r_session != session) {
2502                 pr_err("mdsc_handle_reply got %llu on session mds%d"
2503                        " not mds%d\n", tid, session->s_mds,
2504                        req->r_session ? req->r_session->s_mds : -1);
2505                 mutex_unlock(&mdsc->mutex);
2506                 goto out;
2507         }
2508
2509         /* dup? */
2510         if ((test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags) && !head->safe) ||
2511             (test_bit(CEPH_MDS_R_GOT_SAFE, &req->r_req_flags) && head->safe)) {
2512                 pr_warn("got a dup %s reply on %llu from mds%d\n",
2513                            head->safe ? "safe" : "unsafe", tid, mds);
2514                 mutex_unlock(&mdsc->mutex);
2515                 goto out;
2516         }
2517         if (test_bit(CEPH_MDS_R_GOT_SAFE, &req->r_req_flags)) {
2518                 pr_warn("got unsafe after safe on %llu from mds%d\n",
2519                            tid, mds);
2520                 mutex_unlock(&mdsc->mutex);
2521                 goto out;
2522         }
2523
2524         result = le32_to_cpu(head->result);
2525
2526         /*
2527          * Handle an ESTALE
2528          * if we're not talking to the authority, send to them
2529          * if the authority has changed while we weren't looking,
2530          * send to new authority
2531          * Otherwise we just have to return an ESTALE
2532          */
2533         if (result == -ESTALE) {
2534                 dout("got ESTALE on request %llu", req->r_tid);
2535                 req->r_resend_mds = -1;
2536                 if (req->r_direct_mode != USE_AUTH_MDS) {
2537                         dout("not using auth, setting for that now");
2538                         req->r_direct_mode = USE_AUTH_MDS;
2539                         __do_request(mdsc, req);
2540                         mutex_unlock(&mdsc->mutex);
2541                         goto out;
2542                 } else  {
2543                         int mds = __choose_mds(mdsc, req);
2544                         if (mds >= 0 && mds != req->r_session->s_mds) {
2545                                 dout("but auth changed, so resending");
2546                                 __do_request(mdsc, req);
2547                                 mutex_unlock(&mdsc->mutex);
2548                                 goto out;
2549                         }
2550                 }
2551                 dout("have to return ESTALE on request %llu", req->r_tid);
2552         }
2553
2554
2555         if (head->safe) {
2556                 set_bit(CEPH_MDS_R_GOT_SAFE, &req->r_req_flags);
2557                 __unregister_request(mdsc, req);
2558
2559                 if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) {
2560                         /*
2561                          * We already handled the unsafe response, now do the
2562                          * cleanup.  No need to examine the response; the MDS
2563                          * doesn't include any result info in the safe
2564                          * response.  And even if it did, there is nothing
2565                          * useful we could do with a revised return value.
2566                          */
2567                         dout("got safe reply %llu, mds%d\n", tid, mds);
2568
2569                         /* last unsafe request during umount? */
2570                         if (mdsc->stopping && !__get_oldest_req(mdsc))
2571                                 complete_all(&mdsc->safe_umount_waiters);
2572                         mutex_unlock(&mdsc->mutex);
2573                         goto out;
2574                 }
2575         } else {
2576                 set_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags);
2577                 list_add_tail(&req->r_unsafe_item, &req->r_session->s_unsafe);
2578                 if (req->r_unsafe_dir) {
2579                         struct ceph_inode_info *ci =
2580                                         ceph_inode(req->r_unsafe_dir);
2581                         spin_lock(&ci->i_unsafe_lock);
2582                         list_add_tail(&req->r_unsafe_dir_item,
2583                                       &ci->i_unsafe_dirops);
2584                         spin_unlock(&ci->i_unsafe_lock);
2585                 }
2586         }
2587
2588         dout("handle_reply tid %lld result %d\n", tid, result);
2589         rinfo = &req->r_reply_info;
2590         err = parse_reply_info(msg, rinfo, session->s_con.peer_features);
2591         mutex_unlock(&mdsc->mutex);
2592
2593         mutex_lock(&session->s_mutex);
2594         if (err < 0) {
2595                 pr_err("mdsc_handle_reply got corrupt reply mds%d(tid:%lld)\n", mds, tid);
2596                 ceph_msg_dump(msg);
2597                 goto out_err;
2598         }
2599
2600         /* snap trace */
2601         realm = NULL;
2602         if (rinfo->snapblob_len) {
2603                 down_write(&mdsc->snap_rwsem);
2604                 ceph_update_snap_trace(mdsc, rinfo->snapblob,
2605                                 rinfo->snapblob + rinfo->snapblob_len,
2606                                 le32_to_cpu(head->op) == CEPH_MDS_OP_RMSNAP,
2607                                 &realm);
2608                 downgrade_write(&mdsc->snap_rwsem);
2609         } else {
2610                 down_read(&mdsc->snap_rwsem);
2611         }
2612
2613         /* insert trace into our cache */
2614         mutex_lock(&req->r_fill_mutex);
2615         current->journal_info = req;
2616         err = ceph_fill_trace(mdsc->fsc->sb, req);
2617         if (err == 0) {
2618                 if (result == 0 && (req->r_op == CEPH_MDS_OP_READDIR ||
2619                                     req->r_op == CEPH_MDS_OP_LSSNAP))
2620                         ceph_readdir_prepopulate(req, req->r_session);
2621                 ceph_unreserve_caps(mdsc, &req->r_caps_reservation);
2622         }
2623         current->journal_info = NULL;
2624         mutex_unlock(&req->r_fill_mutex);
2625
2626         up_read(&mdsc->snap_rwsem);
2627         if (realm)
2628                 ceph_put_snap_realm(mdsc, realm);
2629
2630         if (err == 0 && req->r_target_inode &&
2631             test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) {
2632                 struct ceph_inode_info *ci = ceph_inode(req->r_target_inode);
2633                 spin_lock(&ci->i_unsafe_lock);
2634                 list_add_tail(&req->r_unsafe_target_item, &ci->i_unsafe_iops);
2635                 spin_unlock(&ci->i_unsafe_lock);
2636         }
2637 out_err:
2638         mutex_lock(&mdsc->mutex);
2639         if (!test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags)) {
2640                 if (err) {
2641                         req->r_err = err;
2642                 } else {
2643                         req->r_reply =  ceph_msg_get(msg);
2644                         set_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags);
2645                 }
2646         } else {
2647                 dout("reply arrived after request %lld was aborted\n", tid);
2648         }
2649         mutex_unlock(&mdsc->mutex);
2650
2651         mutex_unlock(&session->s_mutex);
2652
2653         /* kick calling process */
2654         complete_request(mdsc, req);
2655 out:
2656         ceph_mdsc_put_request(req);
2657         return;
2658 }
2659
2660
2661
2662 /*
2663  * handle mds notification that our request has been forwarded.
2664  */
2665 static void handle_forward(struct ceph_mds_client *mdsc,
2666                            struct ceph_mds_session *session,
2667                            struct ceph_msg *msg)
2668 {
2669         struct ceph_mds_request *req;
2670         u64 tid = le64_to_cpu(msg->hdr.tid);
2671         u32 next_mds;
2672         u32 fwd_seq;
2673         int err = -EINVAL;
2674         void *p = msg->front.iov_base;
2675         void *end = p + msg->front.iov_len;
2676
2677         ceph_decode_need(&p, end, 2*sizeof(u32), bad);
2678         next_mds = ceph_decode_32(&p);
2679         fwd_seq = ceph_decode_32(&p);
2680
2681         mutex_lock(&mdsc->mutex);
2682         req = lookup_get_request(mdsc, tid);
2683         if (!req) {
2684                 dout("forward tid %llu to mds%d - req dne\n", tid, next_mds);
2685                 goto out;  /* dup reply? */
2686         }
2687
2688         if (test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags)) {
2689                 dout("forward tid %llu aborted, unregistering\n", tid);
2690                 __unregister_request(mdsc, req);
2691         } else if (fwd_seq <= req->r_num_fwd) {
2692                 dout("forward tid %llu to mds%d - old seq %d <= %d\n",
2693                      tid, next_mds, req->r_num_fwd, fwd_seq);
2694         } else {
2695                 /* resend. forward race not possible; mds would drop */
2696                 dout("forward tid %llu to mds%d (we resend)\n", tid, next_mds);
2697                 BUG_ON(req->r_err);
2698                 BUG_ON(test_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags));
2699                 req->r_attempts = 0;
2700                 req->r_num_fwd = fwd_seq;
2701                 req->r_resend_mds = next_mds;
2702                 put_request_session(req);
2703                 __do_request(mdsc, req);
2704         }
2705         ceph_mdsc_put_request(req);
2706 out:
2707         mutex_unlock(&mdsc->mutex);
2708         return;
2709
2710 bad:
2711         pr_err("mdsc_handle_forward decode error err=%d\n", err);
2712 }
2713
2714 /*
2715  * handle a mds session control message
2716  */
2717 static void handle_session(struct ceph_mds_session *session,
2718                            struct ceph_msg *msg)
2719 {
2720         struct ceph_mds_client *mdsc = session->s_mdsc;
2721         u32 op;
2722         u64 seq;
2723         int mds = session->s_mds;
2724         struct ceph_mds_session_head *h = msg->front.iov_base;
2725         int wake = 0;
2726
2727         /* decode */
2728         if (msg->front.iov_len != sizeof(*h))
2729                 goto bad;
2730         op = le32_to_cpu(h->op);
2731         seq = le64_to_cpu(h->seq);
2732
2733         mutex_lock(&mdsc->mutex);
2734         if (op == CEPH_SESSION_CLOSE) {
2735                 get_session(session);
2736                 __unregister_session(mdsc, session);
2737         }
2738         /* FIXME: this ttl calculation is generous */
2739         session->s_ttl = jiffies + HZ*mdsc->mdsmap->m_session_autoclose;
2740         mutex_unlock(&mdsc->mutex);
2741
2742         mutex_lock(&session->s_mutex);
2743
2744         dout("handle_session mds%d %s %p state %s seq %llu\n",
2745              mds, ceph_session_op_name(op), session,
2746              ceph_session_state_name(session->s_state), seq);
2747
2748         if (session->s_state == CEPH_MDS_SESSION_HUNG) {
2749                 session->s_state = CEPH_MDS_SESSION_OPEN;
2750                 pr_info("mds%d came back\n", session->s_mds);
2751         }
2752
2753         switch (op) {
2754         case CEPH_SESSION_OPEN:
2755                 if (session->s_state == CEPH_MDS_SESSION_RECONNECTING)
2756                         pr_info("mds%d reconnect success\n", session->s_mds);
2757                 session->s_state = CEPH_MDS_SESSION_OPEN;
2758                 renewed_caps(mdsc, session, 0);
2759                 wake = 1;
2760                 if (mdsc->stopping)
2761                         __close_session(mdsc, session);
2762                 break;
2763
2764         case CEPH_SESSION_RENEWCAPS:
2765                 if (session->s_renew_seq == seq)
2766                         renewed_caps(mdsc, session, 1);
2767                 break;
2768
2769         case CEPH_SESSION_CLOSE:
2770                 if (session->s_state == CEPH_MDS_SESSION_RECONNECTING)
2771                         pr_info("mds%d reconnect denied\n", session->s_mds);
2772                 cleanup_session_requests(mdsc, session);
2773                 remove_session_caps(session);
2774                 wake = 2; /* for good measure */
2775                 wake_up_all(&mdsc->session_close_wq);
2776                 break;
2777
2778         case CEPH_SESSION_STALE:
2779                 pr_info("mds%d caps went stale, renewing\n",
2780                         session->s_mds);
2781                 spin_lock(&session->s_gen_ttl_lock);
2782                 session->s_cap_gen++;
2783                 session->s_cap_ttl = jiffies - 1;
2784                 spin_unlock(&session->s_gen_ttl_lock);
2785                 send_renew_caps(mdsc, session);
2786                 break;
2787
2788         case CEPH_SESSION_RECALL_STATE:
2789                 ceph_trim_caps(mdsc, session, le32_to_cpu(h->max_caps));
2790                 break;
2791
2792         case CEPH_SESSION_FLUSHMSG:
2793                 send_flushmsg_ack(mdsc, session, seq);
2794                 break;
2795
2796         case CEPH_SESSION_FORCE_RO:
2797                 dout("force_session_readonly %p\n", session);
2798                 spin_lock(&session->s_cap_lock);
2799                 session->s_readonly = true;
2800                 spin_unlock(&session->s_cap_lock);
2801                 wake_up_session_caps(session, 0);
2802                 break;
2803
2804         case CEPH_SESSION_REJECT:
2805                 WARN_ON(session->s_state != CEPH_MDS_SESSION_OPENING);
2806                 pr_info("mds%d rejected session\n", session->s_mds);
2807                 session->s_state = CEPH_MDS_SESSION_REJECTED;
2808                 cleanup_session_requests(mdsc, session);
2809                 remove_session_caps(session);
2810                 wake = 2; /* for good measure */
2811                 break;
2812
2813         default:
2814                 pr_err("mdsc_handle_session bad op %d mds%d\n", op, mds);
2815                 WARN_ON(1);
2816         }
2817
2818         mutex_unlock(&session->s_mutex);
2819         if (wake) {
2820                 mutex_lock(&mdsc->mutex);
2821                 __wake_requests(mdsc, &session->s_waiting);
2822                 if (wake == 2)
2823                         kick_requests(mdsc, mds);
2824                 mutex_unlock(&mdsc->mutex);
2825         }
2826         if (op == CEPH_SESSION_CLOSE)
2827                 ceph_put_mds_session(session);
2828         return;
2829
2830 bad:
2831         pr_err("mdsc_handle_session corrupt message mds%d len %d\n", mds,
2832                (int)msg->front.iov_len);
2833         ceph_msg_dump(msg);
2834         return;
2835 }
2836
2837
2838 /*
2839  * called under session->mutex.
2840  */
2841 static void replay_unsafe_requests(struct ceph_mds_client *mdsc,
2842                                    struct ceph_mds_session *session)
2843 {
2844         struct ceph_mds_request *req, *nreq;
2845         struct rb_node *p;
2846         int err;
2847
2848         dout("replay_unsafe_requests mds%d\n", session->s_mds);
2849
2850         mutex_lock(&mdsc->mutex);
2851         list_for_each_entry_safe(req, nreq, &session->s_unsafe, r_unsafe_item) {
2852                 err = __prepare_send_request(mdsc, req, session->s_mds, true);
2853                 if (!err) {
2854                         ceph_msg_get(req->r_request);
2855                         ceph_con_send(&session->s_con, req->r_request);
2856                 }
2857         }
2858
2859         /*
2860          * also re-send old requests when MDS enters reconnect stage. So that MDS
2861          * can process completed request in clientreplay stage.
2862          */
2863         p = rb_first(&mdsc->request_tree);
2864         while (p) {
2865                 req = rb_entry(p, struct ceph_mds_request, r_node);
2866                 p = rb_next(p);
2867                 if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags))
2868                         continue;
2869                 if (req->r_attempts == 0)
2870                         continue; /* only old requests */
2871                 if (req->r_session &&
2872                     req->r_session->s_mds == session->s_mds) {
2873                         err = __prepare_send_request(mdsc, req,
2874                                                      session->s_mds, true);
2875                         if (!err) {
2876                                 ceph_msg_get(req->r_request);
2877                                 ceph_con_send(&session->s_con, req->r_request);
2878                         }
2879                 }
2880         }
2881         mutex_unlock(&mdsc->mutex);
2882 }
2883
2884 /*
2885  * Encode information about a cap for a reconnect with the MDS.
2886  */
2887 static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap,
2888                           void *arg)
2889 {
2890         union {
2891                 struct ceph_mds_cap_reconnect v2;
2892                 struct ceph_mds_cap_reconnect_v1 v1;
2893         } rec;
2894         struct ceph_inode_info *ci = cap->ci;
2895         struct ceph_reconnect_state *recon_state = arg;
2896         struct ceph_pagelist *pagelist = recon_state->pagelist;
2897         char *path;
2898         int pathlen, err;
2899         u64 pathbase;
2900         u64 snap_follows;
2901         struct dentry *dentry;
2902
2903         dout(" adding %p ino %llx.%llx cap %p %lld %s\n",
2904              inode, ceph_vinop(inode), cap, cap->cap_id,
2905              ceph_cap_string(cap->issued));
2906         err = ceph_pagelist_encode_64(pagelist, ceph_ino(inode));
2907         if (err)
2908                 return err;
2909
2910         dentry = d_find_alias(inode);
2911         if (dentry) {
2912                 path = ceph_mdsc_build_path(dentry, &pathlen, &pathbase, 0);
2913                 if (IS_ERR(path)) {
2914                         err = PTR_ERR(path);
2915                         goto out_dput;
2916                 }
2917         } else {
2918                 path = NULL;
2919                 pathlen = 0;
2920                 pathbase = 0;
2921         }
2922
2923         spin_lock(&ci->i_ceph_lock);
2924         cap->seq = 0;        /* reset cap seq */
2925         cap->issue_seq = 0;  /* and issue_seq */
2926         cap->mseq = 0;       /* and migrate_seq */
2927         cap->cap_gen = cap->session->s_cap_gen;
2928
2929         if (recon_state->msg_version >= 2) {
2930                 rec.v2.cap_id = cpu_to_le64(cap->cap_id);
2931                 rec.v2.wanted = cpu_to_le32(__ceph_caps_wanted(ci));
2932                 rec.v2.issued = cpu_to_le32(cap->issued);
2933                 rec.v2.snaprealm = cpu_to_le64(ci->i_snap_realm->ino);
2934                 rec.v2.pathbase = cpu_to_le64(pathbase);
2935                 rec.v2.flock_len = (__force __le32)
2936                         ((ci->i_ceph_flags & CEPH_I_ERROR_FILELOCK) ? 0 : 1);
2937         } else {
2938                 rec.v1.cap_id = cpu_to_le64(cap->cap_id);
2939                 rec.v1.wanted = cpu_to_le32(__ceph_caps_wanted(ci));
2940                 rec.v1.issued = cpu_to_le32(cap->issued);
2941                 rec.v1.size = cpu_to_le64(inode->i_size);
2942                 ceph_encode_timespec(&rec.v1.mtime, &inode->i_mtime);
2943                 ceph_encode_timespec(&rec.v1.atime, &inode->i_atime);
2944                 rec.v1.snaprealm = cpu_to_le64(ci->i_snap_realm->ino);
2945                 rec.v1.pathbase = cpu_to_le64(pathbase);
2946         }
2947
2948         if (list_empty(&ci->i_cap_snaps)) {
2949                 snap_follows = ci->i_head_snapc ? ci->i_head_snapc->seq : 0;
2950         } else {
2951                 struct ceph_cap_snap *capsnap =
2952                         list_first_entry(&ci->i_cap_snaps,
2953                                          struct ceph_cap_snap, ci_item);
2954                 snap_follows = capsnap->follows;
2955         }
2956         spin_unlock(&ci->i_ceph_lock);
2957
2958         if (recon_state->msg_version >= 2) {
2959                 int num_fcntl_locks, num_flock_locks;
2960                 struct ceph_filelock *flocks = NULL;
2961                 size_t struct_len, total_len = 0;
2962                 u8 struct_v = 0;
2963
2964 encode_again:
2965                 if (rec.v2.flock_len) {
2966                         ceph_count_locks(inode, &num_fcntl_locks, &num_flock_locks);
2967                 } else {
2968                         num_fcntl_locks = 0;
2969                         num_flock_locks = 0;
2970                 }
2971                 if (num_fcntl_locks + num_flock_locks > 0) {
2972                         flocks = kmalloc((num_fcntl_locks + num_flock_locks) *
2973                                          sizeof(struct ceph_filelock), GFP_NOFS);
2974                         if (!flocks) {
2975                                 err = -ENOMEM;
2976                                 goto out_free;
2977                         }
2978                         err = ceph_encode_locks_to_buffer(inode, flocks,
2979                                                           num_fcntl_locks,
2980                                                           num_flock_locks);
2981                         if (err) {
2982                                 kfree(flocks);
2983                                 flocks = NULL;
2984                                 if (err == -ENOSPC)
2985                                         goto encode_again;
2986                                 goto out_free;
2987                         }
2988                 } else {
2989                         kfree(flocks);
2990                         flocks = NULL;
2991                 }
2992
2993                 if (recon_state->msg_version >= 3) {
2994                         /* version, compat_version and struct_len */
2995                         total_len = 2 * sizeof(u8) + sizeof(u32);
2996                         struct_v = 2;
2997                 }
2998                 /*
2999                  * number of encoded locks is stable, so copy to pagelist
3000                  */
3001                 struct_len = 2 * sizeof(u32) +
3002                             (num_fcntl_locks + num_flock_locks) *
3003                             sizeof(struct ceph_filelock);
3004                 rec.v2.flock_len = cpu_to_le32(struct_len);
3005
3006                 struct_len += sizeof(rec.v2);
3007                 struct_len += sizeof(u32) + pathlen;
3008
3009                 if (struct_v >= 2)
3010                         struct_len += sizeof(u64); /* snap_follows */
3011
3012                 total_len += struct_len;
3013                 err = ceph_pagelist_reserve(pagelist, total_len);
3014
3015                 if (!err) {
3016                         if (recon_state->msg_version >= 3) {
3017                                 ceph_pagelist_encode_8(pagelist, struct_v);
3018                                 ceph_pagelist_encode_8(pagelist, 1);
3019                                 ceph_pagelist_encode_32(pagelist, struct_len);
3020                         }
3021                         ceph_pagelist_encode_string(pagelist, path, pathlen);
3022                         ceph_pagelist_append(pagelist, &rec, sizeof(rec.v2));
3023                         ceph_locks_to_pagelist(flocks, pagelist,
3024                                                num_fcntl_locks,
3025                                                num_flock_locks);
3026                         if (struct_v >= 2)
3027                                 ceph_pagelist_encode_64(pagelist, snap_follows);
3028                 }
3029                 kfree(flocks);
3030         } else {
3031                 size_t size = sizeof(u32) + pathlen + sizeof(rec.v1);
3032                 err = ceph_pagelist_reserve(pagelist, size);
3033                 if (!err) {
3034                         ceph_pagelist_encode_string(pagelist, path, pathlen);
3035                         ceph_pagelist_append(pagelist, &rec, sizeof(rec.v1));
3036                 }
3037         }
3038
3039         recon_state->nr_caps++;
3040 out_free:
3041         kfree(path);
3042 out_dput:
3043         dput(dentry);
3044         return err;
3045 }
3046
3047
3048 /*
3049  * If an MDS fails and recovers, clients need to reconnect in order to
3050  * reestablish shared state.  This includes all caps issued through
3051  * this session _and_ the snap_realm hierarchy.  Because it's not
3052  * clear which snap realms the mds cares about, we send everything we
3053  * know about.. that ensures we'll then get any new info the
3054  * recovering MDS might have.
3055  *
3056  * This is a relatively heavyweight operation, but it's rare.
3057  *
3058  * called with mdsc->mutex held.
3059  */
3060 static void send_mds_reconnect(struct ceph_mds_client *mdsc,
3061                                struct ceph_mds_session *session)
3062 {
3063         struct ceph_msg *reply;
3064         struct rb_node *p;
3065         int mds = session->s_mds;
3066         int err = -ENOMEM;
3067         int s_nr_caps;
3068         struct ceph_pagelist *pagelist;
3069         struct ceph_reconnect_state recon_state;
3070         LIST_HEAD(dispose);
3071
3072         pr_info("mds%d reconnect start\n", mds);
3073
3074         pagelist = kmalloc(sizeof(*pagelist), GFP_NOFS);
3075         if (!pagelist)
3076                 goto fail_nopagelist;
3077         ceph_pagelist_init(pagelist);
3078
3079         reply = ceph_msg_new(CEPH_MSG_CLIENT_RECONNECT, 0, GFP_NOFS, false);
3080         if (!reply)
3081                 goto fail_nomsg;
3082
3083         mutex_lock(&session->s_mutex);
3084         session->s_state = CEPH_MDS_SESSION_RECONNECTING;
3085         session->s_seq = 0;
3086
3087         dout("session %p state %s\n", session,
3088              ceph_session_state_name(session->s_state));
3089
3090         spin_lock(&session->s_gen_ttl_lock);
3091         session->s_cap_gen++;
3092         spin_unlock(&session->s_gen_ttl_lock);
3093
3094         spin_lock(&session->s_cap_lock);
3095         /* don't know if session is readonly */
3096         session->s_readonly = 0;
3097         /*
3098          * notify __ceph_remove_cap() that we are composing cap reconnect.
3099          * If a cap get released before being added to the cap reconnect,
3100          * __ceph_remove_cap() should skip queuing cap release.
3101          */
3102         session->s_cap_reconnect = 1;
3103         /* drop old cap expires; we're about to reestablish that state */
3104         detach_cap_releases(session, &dispose);
3105         spin_unlock(&session->s_cap_lock);
3106         dispose_cap_releases(mdsc, &dispose);
3107
3108         /* trim unused caps to reduce MDS's cache rejoin time */
3109         if (mdsc->fsc->sb->s_root)
3110                 shrink_dcache_parent(mdsc->fsc->sb->s_root);
3111
3112         ceph_con_close(&session->s_con);
3113         ceph_con_open(&session->s_con,
3114                       CEPH_ENTITY_TYPE_MDS, mds,
3115                       ceph_mdsmap_get_addr(mdsc->mdsmap, mds));
3116
3117         /* replay unsafe requests */
3118         replay_unsafe_requests(mdsc, session);
3119
3120         down_read(&mdsc->snap_rwsem);
3121
3122         /* traverse this session's caps */
3123         s_nr_caps = session->s_nr_caps;
3124         err = ceph_pagelist_encode_32(pagelist, s_nr_caps);
3125         if (err)
3126                 goto fail;
3127
3128         recon_state.nr_caps = 0;
3129         recon_state.pagelist = pagelist;
3130         if (session->s_con.peer_features & CEPH_FEATURE_MDSENC)
3131                 recon_state.msg_version = 3;
3132         else if (session->s_con.peer_features & CEPH_FEATURE_FLOCK)
3133                 recon_state.msg_version = 2;
3134         else
3135                 recon_state.msg_version = 1;
3136         err = iterate_session_caps(session, encode_caps_cb, &recon_state);
3137         if (err < 0)
3138                 goto fail;
3139
3140         spin_lock(&session->s_cap_lock);
3141         session->s_cap_reconnect = 0;
3142         spin_unlock(&session->s_cap_lock);
3143
3144         /*
3145          * snaprealms.  we provide mds with the ino, seq (version), and
3146          * parent for all of our realms.  If the mds has any newer info,
3147          * it will tell us.
3148          */
3149         for (p = rb_first(&mdsc->snap_realms); p; p = rb_next(p)) {
3150                 struct ceph_snap_realm *realm =
3151                         rb_entry(p, struct ceph_snap_realm, node);
3152                 struct ceph_mds_snaprealm_reconnect sr_rec;
3153
3154                 dout(" adding snap realm %llx seq %lld parent %llx\n",
3155                      realm->ino, realm->seq, realm->parent_ino);
3156                 sr_rec.ino = cpu_to_le64(realm->ino);
3157                 sr_rec.seq = cpu_to_le64(realm->seq);
3158                 sr_rec.parent = cpu_to_le64(realm->parent_ino);
3159                 err = ceph_pagelist_append(pagelist, &sr_rec, sizeof(sr_rec));
3160                 if (err)
3161                         goto fail;
3162         }
3163
3164         reply->hdr.version = cpu_to_le16(recon_state.msg_version);
3165
3166         /* raced with cap release? */
3167         if (s_nr_caps != recon_state.nr_caps) {
3168                 struct page *page = list_first_entry(&pagelist->head,
3169                                                      struct page, lru);
3170                 __le32 *addr = kmap_atomic(page);
3171                 *addr = cpu_to_le32(recon_state.nr_caps);
3172                 kunmap_atomic(addr);
3173         }
3174
3175         reply->hdr.data_len = cpu_to_le32(pagelist->length);
3176         ceph_msg_data_add_pagelist(reply, pagelist);
3177
3178         ceph_early_kick_flushing_caps(mdsc, session);
3179
3180         ceph_con_send(&session->s_con, reply);
3181
3182         mutex_unlock(&session->s_mutex);
3183
3184         mutex_lock(&mdsc->mutex);
3185         __wake_requests(mdsc, &session->s_waiting);
3186         mutex_unlock(&mdsc->mutex);
3187
3188         up_read(&mdsc->snap_rwsem);
3189         return;
3190
3191 fail:
3192         ceph_msg_put(reply);
3193         up_read(&mdsc->snap_rwsem);
3194         mutex_unlock(&session->s_mutex);
3195 fail_nomsg:
3196         ceph_pagelist_release(pagelist);
3197 fail_nopagelist:
3198         pr_err("error %d preparing reconnect for mds%d\n", err, mds);
3199         return;
3200 }
3201
3202
3203 /*
3204  * compare old and new mdsmaps, kicking requests
3205  * and closing out old connections as necessary
3206  *
3207  * called under mdsc->mutex.
3208  */
3209 static void check_new_map(struct ceph_mds_client *mdsc,
3210                           struct ceph_mdsmap *newmap,
3211                           struct ceph_mdsmap *oldmap)
3212 {
3213