sparc64: Wire up compat getpeername and getsockname.
[sfrench/cifs-2.6.git] / fs / ceph / mds_client.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/ceph/ceph_debug.h>
3
4 #include <linux/fs.h>
5 #include <linux/wait.h>
6 #include <linux/slab.h>
7 #include <linux/gfp.h>
8 #include <linux/sched.h>
9 #include <linux/debugfs.h>
10 #include <linux/seq_file.h>
11 #include <linux/ratelimit.h>
12
13 #include "super.h"
14 #include "mds_client.h"
15
16 #include <linux/ceph/ceph_features.h>
17 #include <linux/ceph/messenger.h>
18 #include <linux/ceph/decode.h>
19 #include <linux/ceph/pagelist.h>
20 #include <linux/ceph/auth.h>
21 #include <linux/ceph/debugfs.h>
22
23 /*
24  * A cluster of MDS (metadata server) daemons is responsible for
25  * managing the file system namespace (the directory hierarchy and
26  * inodes) and for coordinating shared access to storage.  Metadata is
27  * partitioning hierarchically across a number of servers, and that
28  * partition varies over time as the cluster adjusts the distribution
29  * in order to balance load.
30  *
31  * The MDS client is primarily responsible to managing synchronous
32  * metadata requests for operations like open, unlink, and so forth.
33  * If there is a MDS failure, we find out about it when we (possibly
34  * request and) receive a new MDS map, and can resubmit affected
35  * requests.
36  *
37  * For the most part, though, we take advantage of a lossless
38  * communications channel to the MDS, and do not need to worry about
39  * timing out or resubmitting requests.
40  *
41  * We maintain a stateful "session" with each MDS we interact with.
42  * Within each session, we sent periodic heartbeat messages to ensure
43  * any capabilities or leases we have been issues remain valid.  If
44  * the session times out and goes stale, our leases and capabilities
45  * are no longer valid.
46  */
47
48 struct ceph_reconnect_state {
49         int nr_caps;
50         struct ceph_pagelist *pagelist;
51         unsigned msg_version;
52 };
53
54 static void __wake_requests(struct ceph_mds_client *mdsc,
55                             struct list_head *head);
56
57 static const struct ceph_connection_operations mds_con_ops;
58
59
60 /*
61  * mds reply parsing
62  */
63
64 /*
65  * parse individual inode info
66  */
67 static int parse_reply_info_in(void **p, void *end,
68                                struct ceph_mds_reply_info_in *info,
69                                u64 features)
70 {
71         int err = -EIO;
72
73         info->in = *p;
74         *p += sizeof(struct ceph_mds_reply_inode) +
75                 sizeof(*info->in->fragtree.splits) *
76                 le32_to_cpu(info->in->fragtree.nsplits);
77
78         ceph_decode_32_safe(p, end, info->symlink_len, bad);
79         ceph_decode_need(p, end, info->symlink_len, bad);
80         info->symlink = *p;
81         *p += info->symlink_len;
82
83         if (features & CEPH_FEATURE_DIRLAYOUTHASH)
84                 ceph_decode_copy_safe(p, end, &info->dir_layout,
85                                       sizeof(info->dir_layout), bad);
86         else
87                 memset(&info->dir_layout, 0, sizeof(info->dir_layout));
88
89         ceph_decode_32_safe(p, end, info->xattr_len, bad);
90         ceph_decode_need(p, end, info->xattr_len, bad);
91         info->xattr_data = *p;
92         *p += info->xattr_len;
93
94         if (features & CEPH_FEATURE_MDS_INLINE_DATA) {
95                 ceph_decode_64_safe(p, end, info->inline_version, bad);
96                 ceph_decode_32_safe(p, end, info->inline_len, bad);
97                 ceph_decode_need(p, end, info->inline_len, bad);
98                 info->inline_data = *p;
99                 *p += info->inline_len;
100         } else
101                 info->inline_version = CEPH_INLINE_NONE;
102
103         if (features & CEPH_FEATURE_MDS_QUOTA) {
104                 u8 struct_v, struct_compat;
105                 u32 struct_len;
106
107                 /*
108                  * both struct_v and struct_compat are expected to be >= 1
109                  */
110                 ceph_decode_8_safe(p, end, struct_v, bad);
111                 ceph_decode_8_safe(p, end, struct_compat, bad);
112                 if (!struct_v || !struct_compat)
113                         goto bad;
114                 ceph_decode_32_safe(p, end, struct_len, bad);
115                 ceph_decode_need(p, end, struct_len, bad);
116                 ceph_decode_64_safe(p, end, info->max_bytes, bad);
117                 ceph_decode_64_safe(p, end, info->max_files, bad);
118         } else {
119                 info->max_bytes = 0;
120                 info->max_files = 0;
121         }
122
123         info->pool_ns_len = 0;
124         info->pool_ns_data = NULL;
125         if (features & CEPH_FEATURE_FS_FILE_LAYOUT_V2) {
126                 ceph_decode_32_safe(p, end, info->pool_ns_len, bad);
127                 if (info->pool_ns_len > 0) {
128                         ceph_decode_need(p, end, info->pool_ns_len, bad);
129                         info->pool_ns_data = *p;
130                         *p += info->pool_ns_len;
131                 }
132         }
133
134         return 0;
135 bad:
136         return err;
137 }
138
139 /*
140  * parse a normal reply, which may contain a (dir+)dentry and/or a
141  * target inode.
142  */
143 static int parse_reply_info_trace(void **p, void *end,
144                                   struct ceph_mds_reply_info_parsed *info,
145                                   u64 features)
146 {
147         int err;
148
149         if (info->head->is_dentry) {
150                 err = parse_reply_info_in(p, end, &info->diri, features);
151                 if (err < 0)
152                         goto out_bad;
153
154                 if (unlikely(*p + sizeof(*info->dirfrag) > end))
155                         goto bad;
156                 info->dirfrag = *p;
157                 *p += sizeof(*info->dirfrag) +
158                         sizeof(u32)*le32_to_cpu(info->dirfrag->ndist);
159                 if (unlikely(*p > end))
160                         goto bad;
161
162                 ceph_decode_32_safe(p, end, info->dname_len, bad);
163                 ceph_decode_need(p, end, info->dname_len, bad);
164                 info->dname = *p;
165                 *p += info->dname_len;
166                 info->dlease = *p;
167                 *p += sizeof(*info->dlease);
168         }
169
170         if (info->head->is_target) {
171                 err = parse_reply_info_in(p, end, &info->targeti, features);
172                 if (err < 0)
173                         goto out_bad;
174         }
175
176         if (unlikely(*p != end))
177                 goto bad;
178         return 0;
179
180 bad:
181         err = -EIO;
182 out_bad:
183         pr_err("problem parsing mds trace %d\n", err);
184         return err;
185 }
186
187 /*
188  * parse readdir results
189  */
190 static int parse_reply_info_dir(void **p, void *end,
191                                 struct ceph_mds_reply_info_parsed *info,
192                                 u64 features)
193 {
194         u32 num, i = 0;
195         int err;
196
197         info->dir_dir = *p;
198         if (*p + sizeof(*info->dir_dir) > end)
199                 goto bad;
200         *p += sizeof(*info->dir_dir) +
201                 sizeof(u32)*le32_to_cpu(info->dir_dir->ndist);
202         if (*p > end)
203                 goto bad;
204
205         ceph_decode_need(p, end, sizeof(num) + 2, bad);
206         num = ceph_decode_32(p);
207         {
208                 u16 flags = ceph_decode_16(p);
209                 info->dir_end = !!(flags & CEPH_READDIR_FRAG_END);
210                 info->dir_complete = !!(flags & CEPH_READDIR_FRAG_COMPLETE);
211                 info->hash_order = !!(flags & CEPH_READDIR_HASH_ORDER);
212                 info->offset_hash = !!(flags & CEPH_READDIR_OFFSET_HASH);
213         }
214         if (num == 0)
215                 goto done;
216
217         BUG_ON(!info->dir_entries);
218         if ((unsigned long)(info->dir_entries + num) >
219             (unsigned long)info->dir_entries + info->dir_buf_size) {
220                 pr_err("dir contents are larger than expected\n");
221                 WARN_ON(1);
222                 goto bad;
223         }
224
225         info->dir_nr = num;
226         while (num) {
227                 struct ceph_mds_reply_dir_entry *rde = info->dir_entries + i;
228                 /* dentry */
229                 ceph_decode_need(p, end, sizeof(u32)*2, bad);
230                 rde->name_len = ceph_decode_32(p);
231                 ceph_decode_need(p, end, rde->name_len, bad);
232                 rde->name = *p;
233                 *p += rde->name_len;
234                 dout("parsed dir dname '%.*s'\n", rde->name_len, rde->name);
235                 rde->lease = *p;
236                 *p += sizeof(struct ceph_mds_reply_lease);
237
238                 /* inode */
239                 err = parse_reply_info_in(p, end, &rde->inode, features);
240                 if (err < 0)
241                         goto out_bad;
242                 /* ceph_readdir_prepopulate() will update it */
243                 rde->offset = 0;
244                 i++;
245                 num--;
246         }
247
248 done:
249         if (*p != end)
250                 goto bad;
251         return 0;
252
253 bad:
254         err = -EIO;
255 out_bad:
256         pr_err("problem parsing dir contents %d\n", err);
257         return err;
258 }
259
260 /*
261  * parse fcntl F_GETLK results
262  */
263 static int parse_reply_info_filelock(void **p, void *end,
264                                      struct ceph_mds_reply_info_parsed *info,
265                                      u64 features)
266 {
267         if (*p + sizeof(*info->filelock_reply) > end)
268                 goto bad;
269
270         info->filelock_reply = *p;
271         *p += sizeof(*info->filelock_reply);
272
273         if (unlikely(*p != end))
274                 goto bad;
275         return 0;
276
277 bad:
278         return -EIO;
279 }
280
281 /*
282  * parse create results
283  */
284 static int parse_reply_info_create(void **p, void *end,
285                                   struct ceph_mds_reply_info_parsed *info,
286                                   u64 features)
287 {
288         if (features & CEPH_FEATURE_REPLY_CREATE_INODE) {
289                 if (*p == end) {
290                         info->has_create_ino = false;
291                 } else {
292                         info->has_create_ino = true;
293                         info->ino = ceph_decode_64(p);
294                 }
295         }
296
297         if (unlikely(*p != end))
298                 goto bad;
299         return 0;
300
301 bad:
302         return -EIO;
303 }
304
305 /*
306  * parse extra results
307  */
308 static int parse_reply_info_extra(void **p, void *end,
309                                   struct ceph_mds_reply_info_parsed *info,
310                                   u64 features)
311 {
312         u32 op = le32_to_cpu(info->head->op);
313
314         if (op == CEPH_MDS_OP_GETFILELOCK)
315                 return parse_reply_info_filelock(p, end, info, features);
316         else if (op == CEPH_MDS_OP_READDIR || op == CEPH_MDS_OP_LSSNAP)
317                 return parse_reply_info_dir(p, end, info, features);
318         else if (op == CEPH_MDS_OP_CREATE)
319                 return parse_reply_info_create(p, end, info, features);
320         else
321                 return -EIO;
322 }
323
324 /*
325  * parse entire mds reply
326  */
327 static int parse_reply_info(struct ceph_msg *msg,
328                             struct ceph_mds_reply_info_parsed *info,
329                             u64 features)
330 {
331         void *p, *end;
332         u32 len;
333         int err;
334
335         info->head = msg->front.iov_base;
336         p = msg->front.iov_base + sizeof(struct ceph_mds_reply_head);
337         end = p + msg->front.iov_len - sizeof(struct ceph_mds_reply_head);
338
339         /* trace */
340         ceph_decode_32_safe(&p, end, len, bad);
341         if (len > 0) {
342                 ceph_decode_need(&p, end, len, bad);
343                 err = parse_reply_info_trace(&p, p+len, info, features);
344                 if (err < 0)
345                         goto out_bad;
346         }
347
348         /* extra */
349         ceph_decode_32_safe(&p, end, len, bad);
350         if (len > 0) {
351                 ceph_decode_need(&p, end, len, bad);
352                 err = parse_reply_info_extra(&p, p+len, info, features);
353                 if (err < 0)
354                         goto out_bad;
355         }
356
357         /* snap blob */
358         ceph_decode_32_safe(&p, end, len, bad);
359         info->snapblob_len = len;
360         info->snapblob = p;
361         p += len;
362
363         if (p != end)
364                 goto bad;
365         return 0;
366
367 bad:
368         err = -EIO;
369 out_bad:
370         pr_err("mds parse_reply err %d\n", err);
371         return err;
372 }
373
374 static void destroy_reply_info(struct ceph_mds_reply_info_parsed *info)
375 {
376         if (!info->dir_entries)
377                 return;
378         free_pages((unsigned long)info->dir_entries, get_order(info->dir_buf_size));
379 }
380
381
382 /*
383  * sessions
384  */
385 const char *ceph_session_state_name(int s)
386 {
387         switch (s) {
388         case CEPH_MDS_SESSION_NEW: return "new";
389         case CEPH_MDS_SESSION_OPENING: return "opening";
390         case CEPH_MDS_SESSION_OPEN: return "open";
391         case CEPH_MDS_SESSION_HUNG: return "hung";
392         case CEPH_MDS_SESSION_CLOSING: return "closing";
393         case CEPH_MDS_SESSION_RESTARTING: return "restarting";
394         case CEPH_MDS_SESSION_RECONNECTING: return "reconnecting";
395         case CEPH_MDS_SESSION_REJECTED: return "rejected";
396         default: return "???";
397         }
398 }
399
400 static struct ceph_mds_session *get_session(struct ceph_mds_session *s)
401 {
402         if (refcount_inc_not_zero(&s->s_ref)) {
403                 dout("mdsc get_session %p %d -> %d\n", s,
404                      refcount_read(&s->s_ref)-1, refcount_read(&s->s_ref));
405                 return s;
406         } else {
407                 dout("mdsc get_session %p 0 -- FAIL\n", s);
408                 return NULL;
409         }
410 }
411
412 void ceph_put_mds_session(struct ceph_mds_session *s)
413 {
414         dout("mdsc put_session %p %d -> %d\n", s,
415              refcount_read(&s->s_ref), refcount_read(&s->s_ref)-1);
416         if (refcount_dec_and_test(&s->s_ref)) {
417                 if (s->s_auth.authorizer)
418                         ceph_auth_destroy_authorizer(s->s_auth.authorizer);
419                 kfree(s);
420         }
421 }
422
423 /*
424  * called under mdsc->mutex
425  */
426 struct ceph_mds_session *__ceph_lookup_mds_session(struct ceph_mds_client *mdsc,
427                                                    int mds)
428 {
429         struct ceph_mds_session *session;
430
431         if (mds >= mdsc->max_sessions || !mdsc->sessions[mds])
432                 return NULL;
433         session = mdsc->sessions[mds];
434         dout("lookup_mds_session %p %d\n", session,
435              refcount_read(&session->s_ref));
436         get_session(session);
437         return session;
438 }
439
440 static bool __have_session(struct ceph_mds_client *mdsc, int mds)
441 {
442         if (mds >= mdsc->max_sessions || !mdsc->sessions[mds])
443                 return false;
444         else
445                 return true;
446 }
447
448 static int __verify_registered_session(struct ceph_mds_client *mdsc,
449                                        struct ceph_mds_session *s)
450 {
451         if (s->s_mds >= mdsc->max_sessions ||
452             mdsc->sessions[s->s_mds] != s)
453                 return -ENOENT;
454         return 0;
455 }
456
457 /*
458  * create+register a new session for given mds.
459  * called under mdsc->mutex.
460  */
461 static struct ceph_mds_session *register_session(struct ceph_mds_client *mdsc,
462                                                  int mds)
463 {
464         struct ceph_mds_session *s;
465
466         if (mds >= mdsc->mdsmap->m_num_mds)
467                 return ERR_PTR(-EINVAL);
468
469         s = kzalloc(sizeof(*s), GFP_NOFS);
470         if (!s)
471                 return ERR_PTR(-ENOMEM);
472
473         if (mds >= mdsc->max_sessions) {
474                 int newmax = 1 << get_count_order(mds + 1);
475                 struct ceph_mds_session **sa;
476
477                 dout("%s: realloc to %d\n", __func__, newmax);
478                 sa = kcalloc(newmax, sizeof(void *), GFP_NOFS);
479                 if (!sa)
480                         goto fail_realloc;
481                 if (mdsc->sessions) {
482                         memcpy(sa, mdsc->sessions,
483                                mdsc->max_sessions * sizeof(void *));
484                         kfree(mdsc->sessions);
485                 }
486                 mdsc->sessions = sa;
487                 mdsc->max_sessions = newmax;
488         }
489
490         dout("%s: mds%d\n", __func__, mds);
491         s->s_mdsc = mdsc;
492         s->s_mds = mds;
493         s->s_state = CEPH_MDS_SESSION_NEW;
494         s->s_ttl = 0;
495         s->s_seq = 0;
496         mutex_init(&s->s_mutex);
497
498         ceph_con_init(&s->s_con, s, &mds_con_ops, &mdsc->fsc->client->msgr);
499
500         spin_lock_init(&s->s_gen_ttl_lock);
501         s->s_cap_gen = 0;
502         s->s_cap_ttl = jiffies - 1;
503
504         spin_lock_init(&s->s_cap_lock);
505         s->s_renew_requested = 0;
506         s->s_renew_seq = 0;
507         INIT_LIST_HEAD(&s->s_caps);
508         s->s_nr_caps = 0;
509         s->s_trim_caps = 0;
510         refcount_set(&s->s_ref, 1);
511         INIT_LIST_HEAD(&s->s_waiting);
512         INIT_LIST_HEAD(&s->s_unsafe);
513         s->s_num_cap_releases = 0;
514         s->s_cap_reconnect = 0;
515         s->s_cap_iterator = NULL;
516         INIT_LIST_HEAD(&s->s_cap_releases);
517         INIT_LIST_HEAD(&s->s_cap_flushing);
518
519         mdsc->sessions[mds] = s;
520         atomic_inc(&mdsc->num_sessions);
521         refcount_inc(&s->s_ref);  /* one ref to sessions[], one to caller */
522
523         ceph_con_open(&s->s_con, CEPH_ENTITY_TYPE_MDS, mds,
524                       ceph_mdsmap_get_addr(mdsc->mdsmap, mds));
525
526         return s;
527
528 fail_realloc:
529         kfree(s);
530         return ERR_PTR(-ENOMEM);
531 }
532
533 /*
534  * called under mdsc->mutex
535  */
536 static void __unregister_session(struct ceph_mds_client *mdsc,
537                                struct ceph_mds_session *s)
538 {
539         dout("__unregister_session mds%d %p\n", s->s_mds, s);
540         BUG_ON(mdsc->sessions[s->s_mds] != s);
541         mdsc->sessions[s->s_mds] = NULL;
542         ceph_con_close(&s->s_con);
543         ceph_put_mds_session(s);
544         atomic_dec(&mdsc->num_sessions);
545 }
546
547 /*
548  * drop session refs in request.
549  *
550  * should be last request ref, or hold mdsc->mutex
551  */
552 static void put_request_session(struct ceph_mds_request *req)
553 {
554         if (req->r_session) {
555                 ceph_put_mds_session(req->r_session);
556                 req->r_session = NULL;
557         }
558 }
559
560 void ceph_mdsc_release_request(struct kref *kref)
561 {
562         struct ceph_mds_request *req = container_of(kref,
563                                                     struct ceph_mds_request,
564                                                     r_kref);
565         destroy_reply_info(&req->r_reply_info);
566         if (req->r_request)
567                 ceph_msg_put(req->r_request);
568         if (req->r_reply)
569                 ceph_msg_put(req->r_reply);
570         if (req->r_inode) {
571                 ceph_put_cap_refs(ceph_inode(req->r_inode), CEPH_CAP_PIN);
572                 iput(req->r_inode);
573         }
574         if (req->r_parent)
575                 ceph_put_cap_refs(ceph_inode(req->r_parent), CEPH_CAP_PIN);
576         iput(req->r_target_inode);
577         if (req->r_dentry)
578                 dput(req->r_dentry);
579         if (req->r_old_dentry)
580                 dput(req->r_old_dentry);
581         if (req->r_old_dentry_dir) {
582                 /*
583                  * track (and drop pins for) r_old_dentry_dir
584                  * separately, since r_old_dentry's d_parent may have
585                  * changed between the dir mutex being dropped and
586                  * this request being freed.
587                  */
588                 ceph_put_cap_refs(ceph_inode(req->r_old_dentry_dir),
589                                   CEPH_CAP_PIN);
590                 iput(req->r_old_dentry_dir);
591         }
592         kfree(req->r_path1);
593         kfree(req->r_path2);
594         if (req->r_pagelist)
595                 ceph_pagelist_release(req->r_pagelist);
596         put_request_session(req);
597         ceph_unreserve_caps(req->r_mdsc, &req->r_caps_reservation);
598         kfree(req);
599 }
600
601 DEFINE_RB_FUNCS(request, struct ceph_mds_request, r_tid, r_node)
602
603 /*
604  * lookup session, bump ref if found.
605  *
606  * called under mdsc->mutex.
607  */
608 static struct ceph_mds_request *
609 lookup_get_request(struct ceph_mds_client *mdsc, u64 tid)
610 {
611         struct ceph_mds_request *req;
612
613         req = lookup_request(&mdsc->request_tree, tid);
614         if (req)
615                 ceph_mdsc_get_request(req);
616
617         return req;
618 }
619
620 /*
621  * Register an in-flight request, and assign a tid.  Link to directory
622  * are modifying (if any).
623  *
624  * Called under mdsc->mutex.
625  */
626 static void __register_request(struct ceph_mds_client *mdsc,
627                                struct ceph_mds_request *req,
628                                struct inode *dir)
629 {
630         int ret = 0;
631
632         req->r_tid = ++mdsc->last_tid;
633         if (req->r_num_caps) {
634                 ret = ceph_reserve_caps(mdsc, &req->r_caps_reservation,
635                                         req->r_num_caps);
636                 if (ret < 0) {
637                         pr_err("__register_request %p "
638                                "failed to reserve caps: %d\n", req, ret);
639                         /* set req->r_err to fail early from __do_request */
640                         req->r_err = ret;
641                         return;
642                 }
643         }
644         dout("__register_request %p tid %lld\n", req, req->r_tid);
645         ceph_mdsc_get_request(req);
646         insert_request(&mdsc->request_tree, req);
647
648         req->r_uid = current_fsuid();
649         req->r_gid = current_fsgid();
650
651         if (mdsc->oldest_tid == 0 && req->r_op != CEPH_MDS_OP_SETFILELOCK)
652                 mdsc->oldest_tid = req->r_tid;
653
654         if (dir) {
655                 ihold(dir);
656                 req->r_unsafe_dir = dir;
657         }
658 }
659
660 static void __unregister_request(struct ceph_mds_client *mdsc,
661                                  struct ceph_mds_request *req)
662 {
663         dout("__unregister_request %p tid %lld\n", req, req->r_tid);
664
665         /* Never leave an unregistered request on an unsafe list! */
666         list_del_init(&req->r_unsafe_item);
667
668         if (req->r_tid == mdsc->oldest_tid) {
669                 struct rb_node *p = rb_next(&req->r_node);
670                 mdsc->oldest_tid = 0;
671                 while (p) {
672                         struct ceph_mds_request *next_req =
673                                 rb_entry(p, struct ceph_mds_request, r_node);
674                         if (next_req->r_op != CEPH_MDS_OP_SETFILELOCK) {
675                                 mdsc->oldest_tid = next_req->r_tid;
676                                 break;
677                         }
678                         p = rb_next(p);
679                 }
680         }
681
682         erase_request(&mdsc->request_tree, req);
683
684         if (req->r_unsafe_dir  &&
685             test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) {
686                 struct ceph_inode_info *ci = ceph_inode(req->r_unsafe_dir);
687                 spin_lock(&ci->i_unsafe_lock);
688                 list_del_init(&req->r_unsafe_dir_item);
689                 spin_unlock(&ci->i_unsafe_lock);
690         }
691         if (req->r_target_inode &&
692             test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) {
693                 struct ceph_inode_info *ci = ceph_inode(req->r_target_inode);
694                 spin_lock(&ci->i_unsafe_lock);
695                 list_del_init(&req->r_unsafe_target_item);
696                 spin_unlock(&ci->i_unsafe_lock);
697         }
698
699         if (req->r_unsafe_dir) {
700                 iput(req->r_unsafe_dir);
701                 req->r_unsafe_dir = NULL;
702         }
703
704         complete_all(&req->r_safe_completion);
705
706         ceph_mdsc_put_request(req);
707 }
708
709 /*
710  * Walk back up the dentry tree until we hit a dentry representing a
711  * non-snapshot inode. We do this using the rcu_read_lock (which must be held
712  * when calling this) to ensure that the objects won't disappear while we're
713  * working with them. Once we hit a candidate dentry, we attempt to take a
714  * reference to it, and return that as the result.
715  */
716 static struct inode *get_nonsnap_parent(struct dentry *dentry)
717 {
718         struct inode *inode = NULL;
719
720         while (dentry && !IS_ROOT(dentry)) {
721                 inode = d_inode_rcu(dentry);
722                 if (!inode || ceph_snap(inode) == CEPH_NOSNAP)
723                         break;
724                 dentry = dentry->d_parent;
725         }
726         if (inode)
727                 inode = igrab(inode);
728         return inode;
729 }
730
731 /*
732  * Choose mds to send request to next.  If there is a hint set in the
733  * request (e.g., due to a prior forward hint from the mds), use that.
734  * Otherwise, consult frag tree and/or caps to identify the
735  * appropriate mds.  If all else fails, choose randomly.
736  *
737  * Called under mdsc->mutex.
738  */
739 static int __choose_mds(struct ceph_mds_client *mdsc,
740                         struct ceph_mds_request *req)
741 {
742         struct inode *inode;
743         struct ceph_inode_info *ci;
744         struct ceph_cap *cap;
745         int mode = req->r_direct_mode;
746         int mds = -1;
747         u32 hash = req->r_direct_hash;
748         bool is_hash = test_bit(CEPH_MDS_R_DIRECT_IS_HASH, &req->r_req_flags);
749
750         /*
751          * is there a specific mds we should try?  ignore hint if we have
752          * no session and the mds is not up (active or recovering).
753          */
754         if (req->r_resend_mds >= 0 &&
755             (__have_session(mdsc, req->r_resend_mds) ||
756              ceph_mdsmap_get_state(mdsc->mdsmap, req->r_resend_mds) > 0)) {
757                 dout("choose_mds using resend_mds mds%d\n",
758                      req->r_resend_mds);
759                 return req->r_resend_mds;
760         }
761
762         if (mode == USE_RANDOM_MDS)
763                 goto random;
764
765         inode = NULL;
766         if (req->r_inode) {
767                 if (ceph_snap(req->r_inode) != CEPH_SNAPDIR) {
768                         inode = req->r_inode;
769                         ihold(inode);
770                 } else {
771                         /* req->r_dentry is non-null for LSSNAP request */
772                         rcu_read_lock();
773                         inode = get_nonsnap_parent(req->r_dentry);
774                         rcu_read_unlock();
775                         dout("__choose_mds using snapdir's parent %p\n", inode);
776                 }
777         } else if (req->r_dentry) {
778                 /* ignore race with rename; old or new d_parent is okay */
779                 struct dentry *parent;
780                 struct inode *dir;
781
782                 rcu_read_lock();
783                 parent = req->r_dentry->d_parent;
784                 dir = req->r_parent ? : d_inode_rcu(parent);
785
786                 if (!dir || dir->i_sb != mdsc->fsc->sb) {
787                         /*  not this fs or parent went negative */
788                         inode = d_inode(req->r_dentry);
789                         if (inode)
790                                 ihold(inode);
791                 } else if (ceph_snap(dir) != CEPH_NOSNAP) {
792                         /* direct snapped/virtual snapdir requests
793                          * based on parent dir inode */
794                         inode = get_nonsnap_parent(parent);
795                         dout("__choose_mds using nonsnap parent %p\n", inode);
796                 } else {
797                         /* dentry target */
798                         inode = d_inode(req->r_dentry);
799                         if (!inode || mode == USE_AUTH_MDS) {
800                                 /* dir + name */
801                                 inode = igrab(dir);
802                                 hash = ceph_dentry_hash(dir, req->r_dentry);
803                                 is_hash = true;
804                         } else {
805                                 ihold(inode);
806                         }
807                 }
808                 rcu_read_unlock();
809         }
810
811         dout("__choose_mds %p is_hash=%d (%d) mode %d\n", inode, (int)is_hash,
812              (int)hash, mode);
813         if (!inode)
814                 goto random;
815         ci = ceph_inode(inode);
816
817         if (is_hash && S_ISDIR(inode->i_mode)) {
818                 struct ceph_inode_frag frag;
819                 int found;
820
821                 ceph_choose_frag(ci, hash, &frag, &found);
822                 if (found) {
823                         if (mode == USE_ANY_MDS && frag.ndist > 0) {
824                                 u8 r;
825
826                                 /* choose a random replica */
827                                 get_random_bytes(&r, 1);
828                                 r %= frag.ndist;
829                                 mds = frag.dist[r];
830                                 dout("choose_mds %p %llx.%llx "
831                                      "frag %u mds%d (%d/%d)\n",
832                                      inode, ceph_vinop(inode),
833                                      frag.frag, mds,
834                                      (int)r, frag.ndist);
835                                 if (ceph_mdsmap_get_state(mdsc->mdsmap, mds) >=
836                                     CEPH_MDS_STATE_ACTIVE)
837                                         goto out;
838                         }
839
840                         /* since this file/dir wasn't known to be
841                          * replicated, then we want to look for the
842                          * authoritative mds. */
843                         mode = USE_AUTH_MDS;
844                         if (frag.mds >= 0) {
845                                 /* choose auth mds */
846                                 mds = frag.mds;
847                                 dout("choose_mds %p %llx.%llx "
848                                      "frag %u mds%d (auth)\n",
849                                      inode, ceph_vinop(inode), frag.frag, mds);
850                                 if (ceph_mdsmap_get_state(mdsc->mdsmap, mds) >=
851                                     CEPH_MDS_STATE_ACTIVE)
852                                         goto out;
853                         }
854                 }
855         }
856
857         spin_lock(&ci->i_ceph_lock);
858         cap = NULL;
859         if (mode == USE_AUTH_MDS)
860                 cap = ci->i_auth_cap;
861         if (!cap && !RB_EMPTY_ROOT(&ci->i_caps))
862                 cap = rb_entry(rb_first(&ci->i_caps), struct ceph_cap, ci_node);
863         if (!cap) {
864                 spin_unlock(&ci->i_ceph_lock);
865                 iput(inode);
866                 goto random;
867         }
868         mds = cap->session->s_mds;
869         dout("choose_mds %p %llx.%llx mds%d (%scap %p)\n",
870              inode, ceph_vinop(inode), mds,
871              cap == ci->i_auth_cap ? "auth " : "", cap);
872         spin_unlock(&ci->i_ceph_lock);
873 out:
874         iput(inode);
875         return mds;
876
877 random:
878         mds = ceph_mdsmap_get_random_mds(mdsc->mdsmap);
879         dout("choose_mds chose random mds%d\n", mds);
880         return mds;
881 }
882
883
884 /*
885  * session messages
886  */
887 static struct ceph_msg *create_session_msg(u32 op, u64 seq)
888 {
889         struct ceph_msg *msg;
890         struct ceph_mds_session_head *h;
891
892         msg = ceph_msg_new(CEPH_MSG_CLIENT_SESSION, sizeof(*h), GFP_NOFS,
893                            false);
894         if (!msg) {
895                 pr_err("create_session_msg ENOMEM creating msg\n");
896                 return NULL;
897         }
898         h = msg->front.iov_base;
899         h->op = cpu_to_le32(op);
900         h->seq = cpu_to_le64(seq);
901
902         return msg;
903 }
904
905 static void encode_supported_features(void **p, void *end)
906 {
907         static const unsigned char bits[] = CEPHFS_FEATURES_CLIENT_SUPPORTED;
908         static const size_t count = ARRAY_SIZE(bits);
909
910         if (count > 0) {
911                 size_t i;
912                 size_t size = ((size_t)bits[count - 1] + 64) / 64 * 8;
913
914                 BUG_ON(*p + 4 + size > end);
915                 ceph_encode_32(p, size);
916                 memset(*p, 0, size);
917                 for (i = 0; i < count; i++)
918                         ((unsigned char*)(*p))[i / 8] |= 1 << (bits[i] % 8);
919                 *p += size;
920         } else {
921                 BUG_ON(*p + 4 > end);
922                 ceph_encode_32(p, 0);
923         }
924 }
925
926 /*
927  * session message, specialization for CEPH_SESSION_REQUEST_OPEN
928  * to include additional client metadata fields.
929  */
930 static struct ceph_msg *create_session_open_msg(struct ceph_mds_client *mdsc, u64 seq)
931 {
932         struct ceph_msg *msg;
933         struct ceph_mds_session_head *h;
934         int i = -1;
935         int extra_bytes = 0;
936         int metadata_key_count = 0;
937         struct ceph_options *opt = mdsc->fsc->client->options;
938         struct ceph_mount_options *fsopt = mdsc->fsc->mount_options;
939         void *p, *end;
940
941         const char* metadata[][2] = {
942                 {"hostname", mdsc->nodename},
943                 {"kernel_version", init_utsname()->release},
944                 {"entity_id", opt->name ? : ""},
945                 {"root", fsopt->server_path ? : "/"},
946                 {NULL, NULL}
947         };
948
949         /* Calculate serialized length of metadata */
950         extra_bytes = 4;  /* map length */
951         for (i = 0; metadata[i][0]; ++i) {
952                 extra_bytes += 8 + strlen(metadata[i][0]) +
953                         strlen(metadata[i][1]);
954                 metadata_key_count++;
955         }
956         /* supported feature */
957         extra_bytes += 4 + 8;
958
959         /* Allocate the message */
960         msg = ceph_msg_new(CEPH_MSG_CLIENT_SESSION, sizeof(*h) + extra_bytes,
961                            GFP_NOFS, false);
962         if (!msg) {
963                 pr_err("create_session_msg ENOMEM creating msg\n");
964                 return NULL;
965         }
966         p = msg->front.iov_base;
967         end = p + msg->front.iov_len;
968
969         h = p;
970         h->op = cpu_to_le32(CEPH_SESSION_REQUEST_OPEN);
971         h->seq = cpu_to_le64(seq);
972
973         /*
974          * Serialize client metadata into waiting buffer space, using
975          * the format that userspace expects for map<string, string>
976          *
977          * ClientSession messages with metadata are v2
978          */
979         msg->hdr.version = cpu_to_le16(3);
980         msg->hdr.compat_version = cpu_to_le16(1);
981
982         /* The write pointer, following the session_head structure */
983         p += sizeof(*h);
984
985         /* Number of entries in the map */
986         ceph_encode_32(&p, metadata_key_count);
987
988         /* Two length-prefixed strings for each entry in the map */
989         for (i = 0; metadata[i][0]; ++i) {
990                 size_t const key_len = strlen(metadata[i][0]);
991                 size_t const val_len = strlen(metadata[i][1]);
992
993                 ceph_encode_32(&p, key_len);
994                 memcpy(p, metadata[i][0], key_len);
995                 p += key_len;
996                 ceph_encode_32(&p, val_len);
997                 memcpy(p, metadata[i][1], val_len);
998                 p += val_len;
999         }
1000
1001         encode_supported_features(&p, end);
1002         msg->front.iov_len = p - msg->front.iov_base;
1003         msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
1004
1005         return msg;
1006 }
1007
1008 /*
1009  * send session open request.
1010  *
1011  * called under mdsc->mutex
1012  */
1013 static int __open_session(struct ceph_mds_client *mdsc,
1014                           struct ceph_mds_session *session)
1015 {
1016         struct ceph_msg *msg;
1017         int mstate;
1018         int mds = session->s_mds;
1019
1020         /* wait for mds to go active? */
1021         mstate = ceph_mdsmap_get_state(mdsc->mdsmap, mds);
1022         dout("open_session to mds%d (%s)\n", mds,
1023              ceph_mds_state_name(mstate));
1024         session->s_state = CEPH_MDS_SESSION_OPENING;
1025         session->s_renew_requested = jiffies;
1026
1027         /* send connect message */
1028         msg = create_session_open_msg(mdsc, session->s_seq);
1029         if (!msg)
1030                 return -ENOMEM;
1031         ceph_con_send(&session->s_con, msg);
1032         return 0;
1033 }
1034
1035 /*
1036  * open sessions for any export targets for the given mds
1037  *
1038  * called under mdsc->mutex
1039  */
1040 static struct ceph_mds_session *
1041 __open_export_target_session(struct ceph_mds_client *mdsc, int target)
1042 {
1043         struct ceph_mds_session *session;
1044
1045         session = __ceph_lookup_mds_session(mdsc, target);
1046         if (!session) {
1047                 session = register_session(mdsc, target);
1048                 if (IS_ERR(session))
1049                         return session;
1050         }
1051         if (session->s_state == CEPH_MDS_SESSION_NEW ||
1052             session->s_state == CEPH_MDS_SESSION_CLOSING)
1053                 __open_session(mdsc, session);
1054
1055         return session;
1056 }
1057
1058 struct ceph_mds_session *
1059 ceph_mdsc_open_export_target_session(struct ceph_mds_client *mdsc, int target)
1060 {
1061         struct ceph_mds_session *session;
1062
1063         dout("open_export_target_session to mds%d\n", target);
1064
1065         mutex_lock(&mdsc->mutex);
1066         session = __open_export_target_session(mdsc, target);
1067         mutex_unlock(&mdsc->mutex);
1068
1069         return session;
1070 }
1071
1072 static void __open_export_target_sessions(struct ceph_mds_client *mdsc,
1073                                           struct ceph_mds_session *session)
1074 {
1075         struct ceph_mds_info *mi;
1076         struct ceph_mds_session *ts;
1077         int i, mds = session->s_mds;
1078
1079         if (mds >= mdsc->mdsmap->m_num_mds)
1080                 return;
1081
1082         mi = &mdsc->mdsmap->m_info[mds];
1083         dout("open_export_target_sessions for mds%d (%d targets)\n",
1084              session->s_mds, mi->num_export_targets);
1085
1086         for (i = 0; i < mi->num_export_targets; i++) {
1087                 ts = __open_export_target_session(mdsc, mi->export_targets[i]);
1088                 if (!IS_ERR(ts))
1089                         ceph_put_mds_session(ts);
1090         }
1091 }
1092
1093 void ceph_mdsc_open_export_target_sessions(struct ceph_mds_client *mdsc,
1094                                            struct ceph_mds_session *session)
1095 {
1096         mutex_lock(&mdsc->mutex);
1097         __open_export_target_sessions(mdsc, session);
1098         mutex_unlock(&mdsc->mutex);
1099 }
1100
1101 /*
1102  * session caps
1103  */
1104
1105 static void detach_cap_releases(struct ceph_mds_session *session,
1106                                 struct list_head *target)
1107 {
1108         lockdep_assert_held(&session->s_cap_lock);
1109
1110         list_splice_init(&session->s_cap_releases, target);
1111         session->s_num_cap_releases = 0;
1112         dout("dispose_cap_releases mds%d\n", session->s_mds);
1113 }
1114
1115 static void dispose_cap_releases(struct ceph_mds_client *mdsc,
1116                                  struct list_head *dispose)
1117 {
1118         while (!list_empty(dispose)) {
1119                 struct ceph_cap *cap;
1120                 /* zero out the in-progress message */
1121                 cap = list_first_entry(dispose, struct ceph_cap, session_caps);
1122                 list_del(&cap->session_caps);
1123                 ceph_put_cap(mdsc, cap);
1124         }
1125 }
1126
1127 static void cleanup_session_requests(struct ceph_mds_client *mdsc,
1128                                      struct ceph_mds_session *session)
1129 {
1130         struct ceph_mds_request *req;
1131         struct rb_node *p;
1132
1133         dout("cleanup_session_requests mds%d\n", session->s_mds);
1134         mutex_lock(&mdsc->mutex);
1135         while (!list_empty(&session->s_unsafe)) {
1136                 req = list_first_entry(&session->s_unsafe,
1137                                        struct ceph_mds_request, r_unsafe_item);
1138                 pr_warn_ratelimited(" dropping unsafe request %llu\n",
1139                                     req->r_tid);
1140                 __unregister_request(mdsc, req);
1141         }
1142         /* zero r_attempts, so kick_requests() will re-send requests */
1143         p = rb_first(&mdsc->request_tree);
1144         while (p) {
1145                 req = rb_entry(p, struct ceph_mds_request, r_node);
1146                 p = rb_next(p);
1147                 if (req->r_session &&
1148                     req->r_session->s_mds == session->s_mds)
1149                         req->r_attempts = 0;
1150         }
1151         mutex_unlock(&mdsc->mutex);
1152 }
1153
1154 /*
1155  * Helper to safely iterate over all caps associated with a session, with
1156  * special care taken to handle a racing __ceph_remove_cap().
1157  *
1158  * Caller must hold session s_mutex.
1159  */
1160 static int iterate_session_caps(struct ceph_mds_session *session,
1161                                  int (*cb)(struct inode *, struct ceph_cap *,
1162                                             void *), void *arg)
1163 {
1164         struct list_head *p;
1165         struct ceph_cap *cap;
1166         struct inode *inode, *last_inode = NULL;
1167         struct ceph_cap *old_cap = NULL;
1168         int ret;
1169
1170         dout("iterate_session_caps %p mds%d\n", session, session->s_mds);
1171         spin_lock(&session->s_cap_lock);
1172         p = session->s_caps.next;
1173         while (p != &session->s_caps) {
1174                 cap = list_entry(p, struct ceph_cap, session_caps);
1175                 inode = igrab(&cap->ci->vfs_inode);
1176                 if (!inode) {
1177                         p = p->next;
1178                         continue;
1179                 }
1180                 session->s_cap_iterator = cap;
1181                 spin_unlock(&session->s_cap_lock);
1182
1183                 if (last_inode) {
1184                         iput(last_inode);
1185                         last_inode = NULL;
1186                 }
1187                 if (old_cap) {
1188                         ceph_put_cap(session->s_mdsc, old_cap);
1189                         old_cap = NULL;
1190                 }
1191
1192                 ret = cb(inode, cap, arg);
1193                 last_inode = inode;
1194
1195                 spin_lock(&session->s_cap_lock);
1196                 p = p->next;
1197                 if (!cap->ci) {
1198                         dout("iterate_session_caps  finishing cap %p removal\n",
1199                              cap);
1200                         BUG_ON(cap->session != session);
1201                         cap->session = NULL;
1202                         list_del_init(&cap->session_caps);
1203                         session->s_nr_caps--;
1204                         if (cap->queue_release) {
1205                                 list_add_tail(&cap->session_caps,
1206                                               &session->s_cap_releases);
1207                                 session->s_num_cap_releases++;
1208                         } else {
1209                                 old_cap = cap;  /* put_cap it w/o locks held */
1210                         }
1211                 }
1212                 if (ret < 0)
1213                         goto out;
1214         }
1215         ret = 0;
1216 out:
1217         session->s_cap_iterator = NULL;
1218         spin_unlock(&session->s_cap_lock);
1219
1220         iput(last_inode);
1221         if (old_cap)
1222                 ceph_put_cap(session->s_mdsc, old_cap);
1223
1224         return ret;
1225 }
1226
1227 static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
1228                                   void *arg)
1229 {
1230         struct ceph_fs_client *fsc = (struct ceph_fs_client *)arg;
1231         struct ceph_inode_info *ci = ceph_inode(inode);
1232         LIST_HEAD(to_remove);
1233         bool drop = false;
1234         bool invalidate = false;
1235
1236         dout("removing cap %p, ci is %p, inode is %p\n",
1237              cap, ci, &ci->vfs_inode);
1238         spin_lock(&ci->i_ceph_lock);
1239         __ceph_remove_cap(cap, false);
1240         if (!ci->i_auth_cap) {
1241                 struct ceph_cap_flush *cf;
1242                 struct ceph_mds_client *mdsc = fsc->mdsc;
1243
1244                 ci->i_ceph_flags |= CEPH_I_CAP_DROPPED;
1245
1246                 if (ci->i_wrbuffer_ref > 0 &&
1247                     READ_ONCE(fsc->mount_state) == CEPH_MOUNT_SHUTDOWN)
1248                         invalidate = true;
1249
1250                 while (!list_empty(&ci->i_cap_flush_list)) {
1251                         cf = list_first_entry(&ci->i_cap_flush_list,
1252                                               struct ceph_cap_flush, i_list);
1253                         list_move(&cf->i_list, &to_remove);
1254                 }
1255
1256                 spin_lock(&mdsc->cap_dirty_lock);
1257
1258                 list_for_each_entry(cf, &to_remove, i_list)
1259                         list_del(&cf->g_list);
1260
1261                 if (!list_empty(&ci->i_dirty_item)) {
1262                         pr_warn_ratelimited(
1263                                 " dropping dirty %s state for %p %lld\n",
1264                                 ceph_cap_string(ci->i_dirty_caps),
1265                                 inode, ceph_ino(inode));
1266                         ci->i_dirty_caps = 0;
1267                         list_del_init(&ci->i_dirty_item);
1268                         drop = true;
1269                 }
1270                 if (!list_empty(&ci->i_flushing_item)) {
1271                         pr_warn_ratelimited(
1272                                 " dropping dirty+flushing %s state for %p %lld\n",
1273                                 ceph_cap_string(ci->i_flushing_caps),
1274                                 inode, ceph_ino(inode));
1275                         ci->i_flushing_caps = 0;
1276                         list_del_init(&ci->i_flushing_item);
1277                         mdsc->num_cap_flushing--;
1278                         drop = true;
1279                 }
1280                 spin_unlock(&mdsc->cap_dirty_lock);
1281
1282                 if (atomic_read(&ci->i_filelock_ref) > 0) {
1283                         /* make further file lock syscall return -EIO */
1284                         ci->i_ceph_flags |= CEPH_I_ERROR_FILELOCK;
1285                         pr_warn_ratelimited(" dropping file locks for %p %lld\n",
1286                                             inode, ceph_ino(inode));
1287                 }
1288
1289                 if (!ci->i_dirty_caps && ci->i_prealloc_cap_flush) {
1290                         list_add(&ci->i_prealloc_cap_flush->i_list, &to_remove);
1291                         ci->i_prealloc_cap_flush = NULL;
1292                 }
1293         }
1294         spin_unlock(&ci->i_ceph_lock);
1295         while (!list_empty(&to_remove)) {
1296                 struct ceph_cap_flush *cf;
1297                 cf = list_first_entry(&to_remove,
1298                                       struct ceph_cap_flush, i_list);
1299                 list_del(&cf->i_list);
1300                 ceph_free_cap_flush(cf);
1301         }
1302
1303         wake_up_all(&ci->i_cap_wq);
1304         if (invalidate)
1305                 ceph_queue_invalidate(inode);
1306         if (drop)
1307                 iput(inode);
1308         return 0;
1309 }
1310
1311 /*
1312  * caller must hold session s_mutex
1313  */
1314 static void remove_session_caps(struct ceph_mds_session *session)
1315 {
1316         struct ceph_fs_client *fsc = session->s_mdsc->fsc;
1317         struct super_block *sb = fsc->sb;
1318         LIST_HEAD(dispose);
1319
1320         dout("remove_session_caps on %p\n", session);
1321         iterate_session_caps(session, remove_session_caps_cb, fsc);
1322
1323         wake_up_all(&fsc->mdsc->cap_flushing_wq);
1324
1325         spin_lock(&session->s_cap_lock);
1326         if (session->s_nr_caps > 0) {
1327                 struct inode *inode;
1328                 struct ceph_cap *cap, *prev = NULL;
1329                 struct ceph_vino vino;
1330                 /*
1331                  * iterate_session_caps() skips inodes that are being
1332                  * deleted, we need to wait until deletions are complete.
1333                  * __wait_on_freeing_inode() is designed for the job,
1334                  * but it is not exported, so use lookup inode function
1335                  * to access it.
1336                  */
1337                 while (!list_empty(&session->s_caps)) {
1338                         cap = list_entry(session->s_caps.next,
1339                                          struct ceph_cap, session_caps);
1340                         if (cap == prev)
1341                                 break;
1342                         prev = cap;
1343                         vino = cap->ci->i_vino;
1344                         spin_unlock(&session->s_cap_lock);
1345
1346                         inode = ceph_find_inode(sb, vino);
1347                         iput(inode);
1348
1349                         spin_lock(&session->s_cap_lock);
1350                 }
1351         }
1352
1353         // drop cap expires and unlock s_cap_lock
1354         detach_cap_releases(session, &dispose);
1355
1356         BUG_ON(session->s_nr_caps > 0);
1357         BUG_ON(!list_empty(&session->s_cap_flushing));
1358         spin_unlock(&session->s_cap_lock);
1359         dispose_cap_releases(session->s_mdsc, &dispose);
1360 }
1361
1362 /*
1363  * wake up any threads waiting on this session's caps.  if the cap is
1364  * old (didn't get renewed on the client reconnect), remove it now.
1365  *
1366  * caller must hold s_mutex.
1367  */
1368 static int wake_up_session_cb(struct inode *inode, struct ceph_cap *cap,
1369                               void *arg)
1370 {
1371         struct ceph_inode_info *ci = ceph_inode(inode);
1372
1373         if (arg) {
1374                 spin_lock(&ci->i_ceph_lock);
1375                 ci->i_wanted_max_size = 0;
1376                 ci->i_requested_max_size = 0;
1377                 spin_unlock(&ci->i_ceph_lock);
1378         }
1379         wake_up_all(&ci->i_cap_wq);
1380         return 0;
1381 }
1382
1383 static void wake_up_session_caps(struct ceph_mds_session *session,
1384                                  int reconnect)
1385 {
1386         dout("wake_up_session_caps %p mds%d\n", session, session->s_mds);
1387         iterate_session_caps(session, wake_up_session_cb,
1388                              (void *)(unsigned long)reconnect);
1389 }
1390
1391 /*
1392  * Send periodic message to MDS renewing all currently held caps.  The
1393  * ack will reset the expiration for all caps from this session.
1394  *
1395  * caller holds s_mutex
1396  */
1397 static int send_renew_caps(struct ceph_mds_client *mdsc,
1398                            struct ceph_mds_session *session)
1399 {
1400         struct ceph_msg *msg;
1401         int state;
1402
1403         if (time_after_eq(jiffies, session->s_cap_ttl) &&
1404             time_after_eq(session->s_cap_ttl, session->s_renew_requested))
1405                 pr_info("mds%d caps stale\n", session->s_mds);
1406         session->s_renew_requested = jiffies;
1407
1408         /* do not try to renew caps until a recovering mds has reconnected
1409          * with its clients. */
1410         state = ceph_mdsmap_get_state(mdsc->mdsmap, session->s_mds);
1411         if (state < CEPH_MDS_STATE_RECONNECT) {
1412                 dout("send_renew_caps ignoring mds%d (%s)\n",
1413                      session->s_mds, ceph_mds_state_name(state));
1414                 return 0;
1415         }
1416
1417         dout("send_renew_caps to mds%d (%s)\n", session->s_mds,
1418                 ceph_mds_state_name(state));
1419         msg = create_session_msg(CEPH_SESSION_REQUEST_RENEWCAPS,
1420                                  ++session->s_renew_seq);
1421         if (!msg)
1422                 return -ENOMEM;
1423         ceph_con_send(&session->s_con, msg);
1424         return 0;
1425 }
1426
1427 static int send_flushmsg_ack(struct ceph_mds_client *mdsc,
1428                              struct ceph_mds_session *session, u64 seq)
1429 {
1430         struct ceph_msg *msg;
1431
1432         dout("send_flushmsg_ack to mds%d (%s)s seq %lld\n",
1433              session->s_mds, ceph_session_state_name(session->s_state), seq);
1434         msg = create_session_msg(CEPH_SESSION_FLUSHMSG_ACK, seq);
1435         if (!msg)
1436                 return -ENOMEM;
1437         ceph_con_send(&session->s_con, msg);
1438         return 0;
1439 }
1440
1441
1442 /*
1443  * Note new cap ttl, and any transition from stale -> not stale (fresh?).
1444  *
1445  * Called under session->s_mutex
1446  */
1447 static void renewed_caps(struct ceph_mds_client *mdsc,
1448                          struct ceph_mds_session *session, int is_renew)
1449 {
1450         int was_stale;
1451         int wake = 0;
1452
1453         spin_lock(&session->s_cap_lock);
1454         was_stale = is_renew && time_after_eq(jiffies, session->s_cap_ttl);
1455
1456         session->s_cap_ttl = session->s_renew_requested +
1457                 mdsc->mdsmap->m_session_timeout*HZ;
1458
1459         if (was_stale) {
1460                 if (time_before(jiffies, session->s_cap_ttl)) {
1461                         pr_info("mds%d caps renewed\n", session->s_mds);
1462                         wake = 1;
1463                 } else {
1464                         pr_info("mds%d caps still stale\n", session->s_mds);
1465                 }
1466         }
1467         dout("renewed_caps mds%d ttl now %lu, was %s, now %s\n",
1468              session->s_mds, session->s_cap_ttl, was_stale ? "stale" : "fresh",
1469              time_before(jiffies, session->s_cap_ttl) ? "stale" : "fresh");
1470         spin_unlock(&session->s_cap_lock);
1471
1472         if (wake)
1473                 wake_up_session_caps(session, 0);
1474 }
1475
1476 /*
1477  * send a session close request
1478  */
1479 static int request_close_session(struct ceph_mds_client *mdsc,
1480                                  struct ceph_mds_session *session)
1481 {
1482         struct ceph_msg *msg;
1483
1484         dout("request_close_session mds%d state %s seq %lld\n",
1485              session->s_mds, ceph_session_state_name(session->s_state),
1486              session->s_seq);
1487         msg = create_session_msg(CEPH_SESSION_REQUEST_CLOSE, session->s_seq);
1488         if (!msg)
1489                 return -ENOMEM;
1490         ceph_con_send(&session->s_con, msg);
1491         return 1;
1492 }
1493
1494 /*
1495  * Called with s_mutex held.
1496  */
1497 static int __close_session(struct ceph_mds_client *mdsc,
1498                          struct ceph_mds_session *session)
1499 {
1500         if (session->s_state >= CEPH_MDS_SESSION_CLOSING)
1501                 return 0;
1502         session->s_state = CEPH_MDS_SESSION_CLOSING;
1503         return request_close_session(mdsc, session);
1504 }
1505
1506 static bool drop_negative_children(struct dentry *dentry)
1507 {
1508         struct dentry *child;
1509         bool all_negative = true;
1510
1511         if (!d_is_dir(dentry))
1512                 goto out;
1513
1514         spin_lock(&dentry->d_lock);
1515         list_for_each_entry(child, &dentry->d_subdirs, d_child) {
1516                 if (d_really_is_positive(child)) {
1517                         all_negative = false;
1518                         break;
1519                 }
1520         }
1521         spin_unlock(&dentry->d_lock);
1522
1523         if (all_negative)
1524                 shrink_dcache_parent(dentry);
1525 out:
1526         return all_negative;
1527 }
1528
1529 /*
1530  * Trim old(er) caps.
1531  *
1532  * Because we can't cache an inode without one or more caps, we do
1533  * this indirectly: if a cap is unused, we prune its aliases, at which
1534  * point the inode will hopefully get dropped to.
1535  *
1536  * Yes, this is a bit sloppy.  Our only real goal here is to respond to
1537  * memory pressure from the MDS, though, so it needn't be perfect.
1538  */
1539 static int trim_caps_cb(struct inode *inode, struct ceph_cap *cap, void *arg)
1540 {
1541         struct ceph_mds_session *session = arg;
1542         struct ceph_inode_info *ci = ceph_inode(inode);
1543         int used, wanted, oissued, mine;
1544
1545         if (session->s_trim_caps <= 0)
1546                 return -1;
1547
1548         spin_lock(&ci->i_ceph_lock);
1549         mine = cap->issued | cap->implemented;
1550         used = __ceph_caps_used(ci);
1551         wanted = __ceph_caps_file_wanted(ci);
1552         oissued = __ceph_caps_issued_other(ci, cap);
1553
1554         dout("trim_caps_cb %p cap %p mine %s oissued %s used %s wanted %s\n",
1555              inode, cap, ceph_cap_string(mine), ceph_cap_string(oissued),
1556              ceph_cap_string(used), ceph_cap_string(wanted));
1557         if (cap == ci->i_auth_cap) {
1558                 if (ci->i_dirty_caps || ci->i_flushing_caps ||
1559                     !list_empty(&ci->i_cap_snaps))
1560                         goto out;
1561                 if ((used | wanted) & CEPH_CAP_ANY_WR)
1562                         goto out;
1563                 /* Note: it's possible that i_filelock_ref becomes non-zero
1564                  * after dropping auth caps. It doesn't hurt because reply
1565                  * of lock mds request will re-add auth caps. */
1566                 if (atomic_read(&ci->i_filelock_ref) > 0)
1567                         goto out;
1568         }
1569         /* The inode has cached pages, but it's no longer used.
1570          * we can safely drop it */
1571         if (wanted == 0 && used == CEPH_CAP_FILE_CACHE &&
1572             !(oissued & CEPH_CAP_FILE_CACHE)) {
1573           used = 0;
1574           oissued = 0;
1575         }
1576         if ((used | wanted) & ~oissued & mine)
1577                 goto out;   /* we need these caps */
1578
1579         if (oissued) {
1580                 /* we aren't the only cap.. just remove us */
1581                 __ceph_remove_cap(cap, true);
1582                 session->s_trim_caps--;
1583         } else {
1584                 struct dentry *dentry;
1585                 /* try dropping referring dentries */
1586                 spin_unlock(&ci->i_ceph_lock);
1587                 dentry = d_find_any_alias(inode);
1588                 if (dentry && drop_negative_children(dentry)) {
1589                         int count;
1590                         dput(dentry);
1591                         d_prune_aliases(inode);
1592                         count = atomic_read(&inode->i_count);
1593                         if (count == 1)
1594                                 session->s_trim_caps--;
1595                         dout("trim_caps_cb %p cap %p pruned, count now %d\n",
1596                              inode, cap, count);
1597                 } else {
1598                         dput(dentry);
1599                 }
1600                 return 0;
1601         }
1602
1603 out:
1604         spin_unlock(&ci->i_ceph_lock);
1605         return 0;
1606 }
1607
1608 /*
1609  * Trim session cap count down to some max number.
1610  */
1611 int ceph_trim_caps(struct ceph_mds_client *mdsc,
1612                    struct ceph_mds_session *session,
1613                    int max_caps)
1614 {
1615         int trim_caps = session->s_nr_caps - max_caps;
1616
1617         dout("trim_caps mds%d start: %d / %d, trim %d\n",
1618              session->s_mds, session->s_nr_caps, max_caps, trim_caps);
1619         if (trim_caps > 0) {
1620                 session->s_trim_caps = trim_caps;
1621                 iterate_session_caps(session, trim_caps_cb, session);
1622                 dout("trim_caps mds%d done: %d / %d, trimmed %d\n",
1623                      session->s_mds, session->s_nr_caps, max_caps,
1624                         trim_caps - session->s_trim_caps);
1625                 session->s_trim_caps = 0;
1626         }
1627
1628         ceph_send_cap_releases(mdsc, session);
1629         return 0;
1630 }
1631
1632 static int check_caps_flush(struct ceph_mds_client *mdsc,
1633                             u64 want_flush_tid)
1634 {
1635         int ret = 1;
1636
1637         spin_lock(&mdsc->cap_dirty_lock);
1638         if (!list_empty(&mdsc->cap_flush_list)) {
1639                 struct ceph_cap_flush *cf =
1640                         list_first_entry(&mdsc->cap_flush_list,
1641                                          struct ceph_cap_flush, g_list);
1642                 if (cf->tid <= want_flush_tid) {
1643                         dout("check_caps_flush still flushing tid "
1644                              "%llu <= %llu\n", cf->tid, want_flush_tid);
1645                         ret = 0;
1646                 }
1647         }
1648         spin_unlock(&mdsc->cap_dirty_lock);
1649         return ret;
1650 }
1651
1652 /*
1653  * flush all dirty inode data to disk.
1654  *
1655  * returns true if we've flushed through want_flush_tid
1656  */
1657 static void wait_caps_flush(struct ceph_mds_client *mdsc,
1658                             u64 want_flush_tid)
1659 {
1660         dout("check_caps_flush want %llu\n", want_flush_tid);
1661
1662         wait_event(mdsc->cap_flushing_wq,
1663                    check_caps_flush(mdsc, want_flush_tid));
1664
1665         dout("check_caps_flush ok, flushed thru %llu\n", want_flush_tid);
1666 }
1667
1668 /*
1669  * called under s_mutex
1670  */
1671 void ceph_send_cap_releases(struct ceph_mds_client *mdsc,
1672                             struct ceph_mds_session *session)
1673 {
1674         struct ceph_msg *msg = NULL;
1675         struct ceph_mds_cap_release *head;
1676         struct ceph_mds_cap_item *item;
1677         struct ceph_osd_client *osdc = &mdsc->fsc->client->osdc;
1678         struct ceph_cap *cap;
1679         LIST_HEAD(tmp_list);
1680         int num_cap_releases;
1681         __le32  barrier, *cap_barrier;
1682
1683         down_read(&osdc->lock);
1684         barrier = cpu_to_le32(osdc->epoch_barrier);
1685         up_read(&osdc->lock);
1686
1687         spin_lock(&session->s_cap_lock);
1688 again:
1689         list_splice_init(&session->s_cap_releases, &tmp_list);
1690         num_cap_releases = session->s_num_cap_releases;
1691         session->s_num_cap_releases = 0;
1692         spin_unlock(&session->s_cap_lock);
1693
1694         while (!list_empty(&tmp_list)) {
1695                 if (!msg) {
1696                         msg = ceph_msg_new(CEPH_MSG_CLIENT_CAPRELEASE,
1697                                         PAGE_SIZE, GFP_NOFS, false);
1698                         if (!msg)
1699                                 goto out_err;
1700                         head = msg->front.iov_base;
1701                         head->num = cpu_to_le32(0);
1702                         msg->front.iov_len = sizeof(*head);
1703
1704                         msg->hdr.version = cpu_to_le16(2);
1705                         msg->hdr.compat_version = cpu_to_le16(1);
1706                 }
1707
1708                 cap = list_first_entry(&tmp_list, struct ceph_cap,
1709                                         session_caps);
1710                 list_del(&cap->session_caps);
1711                 num_cap_releases--;
1712
1713                 head = msg->front.iov_base;
1714                 le32_add_cpu(&head->num, 1);
1715                 item = msg->front.iov_base + msg->front.iov_len;
1716                 item->ino = cpu_to_le64(cap->cap_ino);
1717                 item->cap_id = cpu_to_le64(cap->cap_id);
1718                 item->migrate_seq = cpu_to_le32(cap->mseq);
1719                 item->seq = cpu_to_le32(cap->issue_seq);
1720                 msg->front.iov_len += sizeof(*item);
1721
1722                 ceph_put_cap(mdsc, cap);
1723
1724                 if (le32_to_cpu(head->num) == CEPH_CAPS_PER_RELEASE) {
1725                         // Append cap_barrier field
1726                         cap_barrier = msg->front.iov_base + msg->front.iov_len;
1727                         *cap_barrier = barrier;
1728                         msg->front.iov_len += sizeof(*cap_barrier);
1729
1730                         msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
1731                         dout("send_cap_releases mds%d %p\n", session->s_mds, msg);
1732                         ceph_con_send(&session->s_con, msg);
1733                         msg = NULL;
1734                 }
1735         }
1736
1737         BUG_ON(num_cap_releases != 0);
1738
1739         spin_lock(&session->s_cap_lock);
1740         if (!list_empty(&session->s_cap_releases))
1741                 goto again;
1742         spin_unlock(&session->s_cap_lock);
1743
1744         if (msg) {
1745                 // Append cap_barrier field
1746                 cap_barrier = msg->front.iov_base + msg->front.iov_len;
1747                 *cap_barrier = barrier;
1748                 msg->front.iov_len += sizeof(*cap_barrier);
1749
1750                 msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
1751                 dout("send_cap_releases mds%d %p\n", session->s_mds, msg);
1752                 ceph_con_send(&session->s_con, msg);
1753         }
1754         return;
1755 out_err:
1756         pr_err("send_cap_releases mds%d, failed to allocate message\n",
1757                 session->s_mds);
1758         spin_lock(&session->s_cap_lock);
1759         list_splice(&tmp_list, &session->s_cap_releases);
1760         session->s_num_cap_releases += num_cap_releases;
1761         spin_unlock(&session->s_cap_lock);
1762 }
1763
1764 /*
1765  * requests
1766  */
1767
1768 int ceph_alloc_readdir_reply_buffer(struct ceph_mds_request *req,
1769                                     struct inode *dir)
1770 {
1771         struct ceph_inode_info *ci = ceph_inode(dir);
1772         struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
1773         struct ceph_mount_options *opt = req->r_mdsc->fsc->mount_options;
1774         size_t size = sizeof(struct ceph_mds_reply_dir_entry);
1775         int order, num_entries;
1776
1777         spin_lock(&ci->i_ceph_lock);
1778         num_entries = ci->i_files + ci->i_subdirs;
1779         spin_unlock(&ci->i_ceph_lock);
1780         num_entries = max(num_entries, 1);
1781         num_entries = min(num_entries, opt->max_readdir);
1782
1783         order = get_order(size * num_entries);
1784         while (order >= 0) {
1785                 rinfo->dir_entries = (void*)__get_free_pages(GFP_KERNEL |
1786                                                              __GFP_NOWARN,
1787                                                              order);
1788                 if (rinfo->dir_entries)
1789                         break;
1790                 order--;
1791         }
1792         if (!rinfo->dir_entries)
1793                 return -ENOMEM;
1794
1795         num_entries = (PAGE_SIZE << order) / size;
1796         num_entries = min(num_entries, opt->max_readdir);
1797
1798         rinfo->dir_buf_size = PAGE_SIZE << order;
1799         req->r_num_caps = num_entries + 1;
1800         req->r_args.readdir.max_entries = cpu_to_le32(num_entries);
1801         req->r_args.readdir.max_bytes = cpu_to_le32(opt->max_readdir_bytes);
1802         return 0;
1803 }
1804
1805 /*
1806  * Create an mds request.
1807  */
1808 struct ceph_mds_request *
1809 ceph_mdsc_create_request(struct ceph_mds_client *mdsc, int op, int mode)
1810 {
1811         struct ceph_mds_request *req = kzalloc(sizeof(*req), GFP_NOFS);
1812         struct timespec64 ts;
1813
1814         if (!req)
1815                 return ERR_PTR(-ENOMEM);
1816
1817         mutex_init(&req->r_fill_mutex);
1818         req->r_mdsc = mdsc;
1819         req->r_started = jiffies;
1820         req->r_resend_mds = -1;
1821         INIT_LIST_HEAD(&req->r_unsafe_dir_item);
1822         INIT_LIST_HEAD(&req->r_unsafe_target_item);
1823         req->r_fmode = -1;
1824         kref_init(&req->r_kref);
1825         RB_CLEAR_NODE(&req->r_node);
1826         INIT_LIST_HEAD(&req->r_wait);
1827         init_completion(&req->r_completion);
1828         init_completion(&req->r_safe_completion);
1829         INIT_LIST_HEAD(&req->r_unsafe_item);
1830
1831         ktime_get_coarse_real_ts64(&ts);
1832         req->r_stamp = timespec64_trunc(ts, mdsc->fsc->sb->s_time_gran);
1833
1834         req->r_op = op;
1835         req->r_direct_mode = mode;
1836         return req;
1837 }
1838
1839 /*
1840  * return oldest (lowest) request, tid in request tree, 0 if none.
1841  *
1842  * called under mdsc->mutex.
1843  */
1844 static struct ceph_mds_request *__get_oldest_req(struct ceph_mds_client *mdsc)
1845 {
1846         if (RB_EMPTY_ROOT(&mdsc->request_tree))
1847                 return NULL;
1848         return rb_entry(rb_first(&mdsc->request_tree),
1849                         struct ceph_mds_request, r_node);
1850 }
1851
1852 static inline  u64 __get_oldest_tid(struct ceph_mds_client *mdsc)
1853 {
1854         return mdsc->oldest_tid;
1855 }
1856
1857 /*
1858  * Build a dentry's path.  Allocate on heap; caller must kfree.  Based
1859  * on build_path_from_dentry in fs/cifs/dir.c.
1860  *
1861  * If @stop_on_nosnap, generate path relative to the first non-snapped
1862  * inode.
1863  *
1864  * Encode hidden .snap dirs as a double /, i.e.
1865  *   foo/.snap/bar -> foo//bar
1866  */
1867 char *ceph_mdsc_build_path(struct dentry *dentry, int *plen, u64 *base,
1868                            int stop_on_nosnap)
1869 {
1870         struct dentry *temp;
1871         char *path;
1872         int len, pos;
1873         unsigned seq;
1874
1875         if (!dentry)
1876                 return ERR_PTR(-EINVAL);
1877
1878 retry:
1879         len = 0;
1880         seq = read_seqbegin(&rename_lock);
1881         rcu_read_lock();
1882         for (temp = dentry; !IS_ROOT(temp);) {
1883                 struct inode *inode = d_inode(temp);
1884                 if (inode && ceph_snap(inode) == CEPH_SNAPDIR)
1885                         len++;  /* slash only */
1886                 else if (stop_on_nosnap && inode &&
1887                          ceph_snap(inode) == CEPH_NOSNAP)
1888                         break;
1889                 else
1890                         len += 1 + temp->d_name.len;
1891                 temp = temp->d_parent;
1892         }
1893         rcu_read_unlock();
1894         if (len)
1895                 len--;  /* no leading '/' */
1896
1897         path = kmalloc(len+1, GFP_NOFS);
1898         if (!path)
1899                 return ERR_PTR(-ENOMEM);
1900         pos = len;
1901         path[pos] = 0;  /* trailing null */
1902         rcu_read_lock();
1903         for (temp = dentry; !IS_ROOT(temp) && pos != 0; ) {
1904                 struct inode *inode;
1905
1906                 spin_lock(&temp->d_lock);
1907                 inode = d_inode(temp);
1908                 if (inode && ceph_snap(inode) == CEPH_SNAPDIR) {
1909                         dout("build_path path+%d: %p SNAPDIR\n",
1910                              pos, temp);
1911                 } else if (stop_on_nosnap && inode &&
1912                            ceph_snap(inode) == CEPH_NOSNAP) {
1913                         spin_unlock(&temp->d_lock);
1914                         break;
1915                 } else {
1916                         pos -= temp->d_name.len;
1917                         if (pos < 0) {
1918                                 spin_unlock(&temp->d_lock);
1919                                 break;
1920                         }
1921                         strncpy(path + pos, temp->d_name.name,
1922                                 temp->d_name.len);
1923                 }
1924                 spin_unlock(&temp->d_lock);
1925                 if (pos)
1926                         path[--pos] = '/';
1927                 temp = temp->d_parent;
1928         }
1929         rcu_read_unlock();
1930         if (pos != 0 || read_seqretry(&rename_lock, seq)) {
1931                 pr_err("build_path did not end path lookup where "
1932                        "expected, namelen is %d, pos is %d\n", len, pos);
1933                 /* presumably this is only possible if racing with a
1934                    rename of one of the parent directories (we can not
1935                    lock the dentries above us to prevent this, but
1936                    retrying should be harmless) */
1937                 kfree(path);
1938                 goto retry;
1939         }
1940
1941         *base = ceph_ino(d_inode(temp));
1942         *plen = len;
1943         dout("build_path on %p %d built %llx '%.*s'\n",
1944              dentry, d_count(dentry), *base, len, path);
1945         return path;
1946 }
1947
1948 static int build_dentry_path(struct dentry *dentry, struct inode *dir,
1949                              const char **ppath, int *ppathlen, u64 *pino,
1950                              int *pfreepath)
1951 {
1952         char *path;
1953
1954         rcu_read_lock();
1955         if (!dir)
1956                 dir = d_inode_rcu(dentry->d_parent);
1957         if (dir && ceph_snap(dir) == CEPH_NOSNAP) {
1958                 *pino = ceph_ino(dir);
1959                 rcu_read_unlock();
1960                 *ppath = dentry->d_name.name;
1961                 *ppathlen = dentry->d_name.len;
1962                 return 0;
1963         }
1964         rcu_read_unlock();
1965         path = ceph_mdsc_build_path(dentry, ppathlen, pino, 1);
1966         if (IS_ERR(path))
1967                 return PTR_ERR(path);
1968         *ppath = path;
1969         *pfreepath = 1;
1970         return 0;
1971 }
1972
1973 static int build_inode_path(struct inode *inode,
1974                             const char **ppath, int *ppathlen, u64 *pino,
1975                             int *pfreepath)
1976 {
1977         struct dentry *dentry;
1978         char *path;
1979
1980         if (ceph_snap(inode) == CEPH_NOSNAP) {
1981                 *pino = ceph_ino(inode);
1982                 *ppathlen = 0;
1983                 return 0;
1984         }
1985         dentry = d_find_alias(inode);
1986         path = ceph_mdsc_build_path(dentry, ppathlen, pino, 1);
1987         dput(dentry);
1988         if (IS_ERR(path))
1989                 return PTR_ERR(path);
1990         *ppath = path;
1991         *pfreepath = 1;
1992         return 0;
1993 }
1994
1995 /*
1996  * request arguments may be specified via an inode *, a dentry *, or
1997  * an explicit ino+path.
1998  */
1999 static int set_request_path_attr(struct inode *rinode, struct dentry *rdentry,
2000                                   struct inode *rdiri, const char *rpath,
2001                                   u64 rino, const char **ppath, int *pathlen,
2002                                   u64 *ino, int *freepath)
2003 {
2004         int r = 0;
2005
2006         if (rinode) {
2007                 r = build_inode_path(rinode, ppath, pathlen, ino, freepath);
2008                 dout(" inode %p %llx.%llx\n", rinode, ceph_ino(rinode),
2009                      ceph_snap(rinode));
2010         } else if (rdentry) {
2011                 r = build_dentry_path(rdentry, rdiri, ppath, pathlen, ino,
2012                                         freepath);
2013                 dout(" dentry %p %llx/%.*s\n", rdentry, *ino, *pathlen,
2014                      *ppath);
2015         } else if (rpath || rino) {
2016                 *ino = rino;
2017                 *ppath = rpath;
2018                 *pathlen = rpath ? strlen(rpath) : 0;
2019                 dout(" path %.*s\n", *pathlen, rpath);
2020         }
2021
2022         return r;
2023 }
2024
2025 /*
2026  * called under mdsc->mutex
2027  */
2028 static struct ceph_msg *create_request_message(struct ceph_mds_client *mdsc,
2029                                                struct ceph_mds_request *req,
2030                                                int mds, bool drop_cap_releases)
2031 {
2032         struct ceph_msg *msg;
2033         struct ceph_mds_request_head *head;
2034         const char *path1 = NULL;
2035         const char *path2 = NULL;
2036         u64 ino1 = 0, ino2 = 0;
2037         int pathlen1 = 0, pathlen2 = 0;
2038         int freepath1 = 0, freepath2 = 0;
2039         int len;
2040         u16 releases;
2041         void *p, *end;
2042         int ret;
2043
2044         ret = set_request_path_attr(req->r_inode, req->r_dentry,
2045                               req->r_parent, req->r_path1, req->r_ino1.ino,
2046                               &path1, &pathlen1, &ino1, &freepath1);
2047         if (ret < 0) {
2048                 msg = ERR_PTR(ret);
2049                 goto out;
2050         }
2051
2052         ret = set_request_path_attr(NULL, req->r_old_dentry,
2053                               req->r_old_dentry_dir,
2054                               req->r_path2, req->r_ino2.ino,
2055                               &path2, &pathlen2, &ino2, &freepath2);
2056         if (ret < 0) {
2057                 msg = ERR_PTR(ret);
2058                 goto out_free1;
2059         }
2060
2061         len = sizeof(*head) +
2062                 pathlen1 + pathlen2 + 2*(1 + sizeof(u32) + sizeof(u64)) +
2063                 sizeof(struct ceph_timespec);
2064
2065         /* calculate (max) length for cap releases */
2066         len += sizeof(struct ceph_mds_request_release) *
2067                 (!!req->r_inode_drop + !!req->r_dentry_drop +
2068                  !!req->r_old_inode_drop + !!req->r_old_dentry_drop);
2069         if (req->r_dentry_drop)
2070                 len += req->r_dentry->d_name.len;
2071         if (req->r_old_dentry_drop)
2072                 len += req->r_old_dentry->d_name.len;
2073
2074         msg = ceph_msg_new(CEPH_MSG_CLIENT_REQUEST, len, GFP_NOFS, false);
2075         if (!msg) {
2076                 msg = ERR_PTR(-ENOMEM);
2077                 goto out_free2;
2078         }
2079
2080         msg->hdr.version = cpu_to_le16(2);
2081         msg->hdr.tid = cpu_to_le64(req->r_tid);
2082
2083         head = msg->front.iov_base;
2084         p = msg->front.iov_base + sizeof(*head);
2085         end = msg->front.iov_base + msg->front.iov_len;
2086
2087         head->mdsmap_epoch = cpu_to_le32(mdsc->mdsmap->m_epoch);
2088         head->op = cpu_to_le32(req->r_op);
2089         head->caller_uid = cpu_to_le32(from_kuid(&init_user_ns, req->r_uid));
2090         head->caller_gid = cpu_to_le32(from_kgid(&init_user_ns, req->r_gid));
2091         head->args = req->r_args;
2092
2093         ceph_encode_filepath(&p, end, ino1, path1);
2094         ceph_encode_filepath(&p, end, ino2, path2);
2095
2096         /* make note of release offset, in case we need to replay */
2097         req->r_request_release_offset = p - msg->front.iov_base;
2098
2099         /* cap releases */
2100         releases = 0;
2101         if (req->r_inode_drop)
2102                 releases += ceph_encode_inode_release(&p,
2103                       req->r_inode ? req->r_inode : d_inode(req->r_dentry),
2104                       mds, req->r_inode_drop, req->r_inode_unless, 0);
2105         if (req->r_dentry_drop)
2106                 releases += ceph_encode_dentry_release(&p, req->r_dentry,
2107                                 req->r_parent, mds, req->r_dentry_drop,
2108                                 req->r_dentry_unless);
2109         if (req->r_old_dentry_drop)
2110                 releases += ceph_encode_dentry_release(&p, req->r_old_dentry,
2111                                 req->r_old_dentry_dir, mds,
2112                                 req->r_old_dentry_drop,
2113                                 req->r_old_dentry_unless);
2114         if (req->r_old_inode_drop)
2115                 releases += ceph_encode_inode_release(&p,
2116                       d_inode(req->r_old_dentry),
2117                       mds, req->r_old_inode_drop, req->r_old_inode_unless, 0);
2118
2119         if (drop_cap_releases) {
2120                 releases = 0;
2121                 p = msg->front.iov_base + req->r_request_release_offset;
2122         }
2123
2124         head->num_releases = cpu_to_le16(releases);
2125
2126         /* time stamp */
2127         {
2128                 struct ceph_timespec ts;
2129                 ceph_encode_timespec64(&ts, &req->r_stamp);
2130                 ceph_encode_copy(&p, &ts, sizeof(ts));
2131         }
2132
2133         BUG_ON(p > end);
2134         msg->front.iov_len = p - msg->front.iov_base;
2135         msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
2136
2137         if (req->r_pagelist) {
2138                 struct ceph_pagelist *pagelist = req->r_pagelist;
2139                 refcount_inc(&pagelist->refcnt);
2140                 ceph_msg_data_add_pagelist(msg, pagelist);
2141                 msg->hdr.data_len = cpu_to_le32(pagelist->length);
2142         } else {
2143                 msg->hdr.data_len = 0;
2144         }
2145
2146         msg->hdr.data_off = cpu_to_le16(0);
2147
2148 out_free2:
2149         if (freepath2)
2150                 kfree((char *)path2);
2151 out_free1:
2152         if (freepath1)
2153                 kfree((char *)path1);
2154 out:
2155         return msg;
2156 }
2157
2158 /*
2159  * called under mdsc->mutex if error, under no mutex if
2160  * success.
2161  */
2162 static void complete_request(struct ceph_mds_client *mdsc,
2163                              struct ceph_mds_request *req)
2164 {
2165         if (req->r_callback)
2166                 req->r_callback(mdsc, req);
2167         else
2168                 complete_all(&req->r_completion);
2169 }
2170
2171 /*
2172  * called under mdsc->mutex
2173  */
2174 static int __prepare_send_request(struct ceph_mds_client *mdsc,
2175                                   struct ceph_mds_request *req,
2176                                   int mds, bool drop_cap_releases)
2177 {
2178         struct ceph_mds_request_head *rhead;
2179         struct ceph_msg *msg;
2180         int flags = 0;
2181
2182         req->r_attempts++;
2183         if (req->r_inode) {
2184                 struct ceph_cap *cap =
2185                         ceph_get_cap_for_mds(ceph_inode(req->r_inode), mds);
2186
2187                 if (cap)
2188                         req->r_sent_on_mseq = cap->mseq;
2189                 else
2190                         req->r_sent_on_mseq = -1;
2191         }
2192         dout("prepare_send_request %p tid %lld %s (attempt %d)\n", req,
2193              req->r_tid, ceph_mds_op_name(req->r_op), req->r_attempts);
2194
2195         if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) {
2196                 void *p;
2197                 /*
2198                  * Replay.  Do not regenerate message (and rebuild
2199                  * paths, etc.); just use the original message.
2200                  * Rebuilding paths will break for renames because
2201                  * d_move mangles the src name.
2202                  */
2203                 msg = req->r_request;
2204                 rhead = msg->front.iov_base;
2205
2206                 flags = le32_to_cpu(rhead->flags);
2207                 flags |= CEPH_MDS_FLAG_REPLAY;
2208                 rhead->flags = cpu_to_le32(flags);
2209
2210                 if (req->r_target_inode)
2211                         rhead->ino = cpu_to_le64(ceph_ino(req->r_target_inode));
2212
2213                 rhead->num_retry = req->r_attempts - 1;
2214
2215                 /* remove cap/dentry releases from message */
2216                 rhead->num_releases = 0;
2217
2218                 /* time stamp */
2219                 p = msg->front.iov_base + req->r_request_release_offset;
2220                 {
2221                         struct ceph_timespec ts;
2222                         ceph_encode_timespec64(&ts, &req->r_stamp);
2223                         ceph_encode_copy(&p, &ts, sizeof(ts));
2224                 }
2225
2226                 msg->front.iov_len = p - msg->front.iov_base;
2227                 msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
2228                 return 0;
2229         }
2230
2231         if (req->r_request) {
2232                 ceph_msg_put(req->r_request);
2233                 req->r_request = NULL;
2234         }
2235         msg = create_request_message(mdsc, req, mds, drop_cap_releases);
2236         if (IS_ERR(msg)) {
2237                 req->r_err = PTR_ERR(msg);
2238                 return PTR_ERR(msg);
2239         }
2240         req->r_request = msg;
2241
2242         rhead = msg->front.iov_base;
2243         rhead->oldest_client_tid = cpu_to_le64(__get_oldest_tid(mdsc));
2244         if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags))
2245                 flags |= CEPH_MDS_FLAG_REPLAY;
2246         if (req->r_parent)
2247                 flags |= CEPH_MDS_FLAG_WANT_DENTRY;
2248         rhead->flags = cpu_to_le32(flags);
2249         rhead->num_fwd = req->r_num_fwd;
2250         rhead->num_retry = req->r_attempts - 1;
2251         rhead->ino = 0;
2252
2253         dout(" r_parent = %p\n", req->r_parent);
2254         return 0;
2255 }
2256
2257 /*
2258  * send request, or put it on the appropriate wait list.
2259  */
2260 static void __do_request(struct ceph_mds_client *mdsc,
2261                         struct ceph_mds_request *req)
2262 {
2263         struct ceph_mds_session *session = NULL;
2264         int mds = -1;
2265         int err = 0;
2266
2267         if (req->r_err || test_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags)) {
2268                 if (test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags))
2269                         __unregister_request(mdsc, req);
2270                 return;
2271         }
2272
2273         if (req->r_timeout &&
2274             time_after_eq(jiffies, req->r_started + req->r_timeout)) {
2275                 dout("do_request timed out\n");
2276                 err = -EIO;
2277                 goto finish;
2278         }
2279         if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) {
2280                 dout("do_request forced umount\n");
2281                 err = -EIO;
2282                 goto finish;
2283         }
2284         if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_MOUNTING) {
2285                 if (mdsc->mdsmap_err) {
2286                         err = mdsc->mdsmap_err;
2287                         dout("do_request mdsmap err %d\n", err);
2288                         goto finish;
2289                 }
2290                 if (mdsc->mdsmap->m_epoch == 0) {
2291                         dout("do_request no mdsmap, waiting for map\n");
2292                         list_add(&req->r_wait, &mdsc->waiting_for_map);
2293                         return;
2294                 }
2295                 if (!(mdsc->fsc->mount_options->flags &
2296                       CEPH_MOUNT_OPT_MOUNTWAIT) &&
2297                     !ceph_mdsmap_is_cluster_available(mdsc->mdsmap)) {
2298                         err = -ENOENT;
2299                         pr_info("probably no mds server is up\n");
2300                         goto finish;
2301                 }
2302         }
2303
2304         put_request_session(req);
2305
2306         mds = __choose_mds(mdsc, req);
2307         if (mds < 0 ||
2308             ceph_mdsmap_get_state(mdsc->mdsmap, mds) < CEPH_MDS_STATE_ACTIVE) {
2309                 dout("do_request no mds or not active, waiting for map\n");
2310                 list_add(&req->r_wait, &mdsc->waiting_for_map);
2311                 return;
2312         }
2313
2314         /* get, open session */
2315         session = __ceph_lookup_mds_session(mdsc, mds);
2316         if (!session) {
2317                 session = register_session(mdsc, mds);
2318                 if (IS_ERR(session)) {
2319                         err = PTR_ERR(session);
2320                         goto finish;
2321                 }
2322         }
2323         req->r_session = get_session(session);
2324
2325         dout("do_request mds%d session %p state %s\n", mds, session,
2326              ceph_session_state_name(session->s_state));
2327         if (session->s_state != CEPH_MDS_SESSION_OPEN &&
2328             session->s_state != CEPH_MDS_SESSION_HUNG) {
2329                 if (session->s_state == CEPH_MDS_SESSION_REJECTED) {
2330                         err = -EACCES;
2331                         goto out_session;
2332                 }
2333                 if (session->s_state == CEPH_MDS_SESSION_NEW ||
2334                     session->s_state == CEPH_MDS_SESSION_CLOSING)
2335                         __open_session(mdsc, session);
2336                 list_add(&req->r_wait, &session->s_waiting);
2337                 goto out_session;
2338         }
2339
2340         /* send request */
2341         req->r_resend_mds = -1;   /* forget any previous mds hint */
2342
2343         if (req->r_request_started == 0)   /* note request start time */
2344                 req->r_request_started = jiffies;
2345
2346         err = __prepare_send_request(mdsc, req, mds, false);
2347         if (!err) {
2348                 ceph_msg_get(req->r_request);
2349                 ceph_con_send(&session->s_con, req->r_request);
2350         }
2351
2352 out_session:
2353         ceph_put_mds_session(session);
2354 finish:
2355         if (err) {
2356                 dout("__do_request early error %d\n", err);
2357                 req->r_err = err;
2358                 complete_request(mdsc, req);
2359                 __unregister_request(mdsc, req);
2360         }
2361         return;
2362 }
2363
2364 /*
2365  * called under mdsc->mutex
2366  */
2367 static void __wake_requests(struct ceph_mds_client *mdsc,
2368                             struct list_head *head)
2369 {
2370         struct ceph_mds_request *req;
2371         LIST_HEAD(tmp_list);
2372
2373         list_splice_init(head, &tmp_list);
2374
2375         while (!list_empty(&tmp_list)) {
2376                 req = list_entry(tmp_list.next,
2377                                  struct ceph_mds_request, r_wait);
2378                 list_del_init(&req->r_wait);
2379                 dout(" wake request %p tid %llu\n", req, req->r_tid);
2380                 __do_request(mdsc, req);
2381         }
2382 }
2383
2384 /*
2385  * Wake up threads with requests pending for @mds, so that they can
2386  * resubmit their requests to a possibly different mds.
2387  */
2388 static void kick_requests(struct ceph_mds_client *mdsc, int mds)
2389 {
2390         struct ceph_mds_request *req;
2391         struct rb_node *p = rb_first(&mdsc->request_tree);
2392
2393         dout("kick_requests mds%d\n", mds);
2394         while (p) {
2395                 req = rb_entry(p, struct ceph_mds_request, r_node);
2396                 p = rb_next(p);
2397                 if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags))
2398                         continue;
2399                 if (req->r_attempts > 0)
2400                         continue; /* only new requests */
2401                 if (req->r_session &&
2402                     req->r_session->s_mds == mds) {
2403                         dout(" kicking tid %llu\n", req->r_tid);
2404                         list_del_init(&req->r_wait);
2405                         __do_request(mdsc, req);
2406                 }
2407         }
2408 }
2409
2410 void ceph_mdsc_submit_request(struct ceph_mds_client *mdsc,
2411                               struct ceph_mds_request *req)
2412 {
2413         dout("submit_request on %p\n", req);
2414         mutex_lock(&mdsc->mutex);
2415         __register_request(mdsc, req, NULL);
2416         __do_request(mdsc, req);
2417         mutex_unlock(&mdsc->mutex);
2418 }
2419
2420 /*
2421  * Synchrously perform an mds request.  Take care of all of the
2422  * session setup, forwarding, retry details.
2423  */
2424 int ceph_mdsc_do_request(struct ceph_mds_client *mdsc,
2425                          struct inode *dir,
2426                          struct ceph_mds_request *req)
2427 {
2428         int err;
2429
2430         dout("do_request on %p\n", req);
2431
2432         /* take CAP_PIN refs for r_inode, r_parent, r_old_dentry */
2433         if (req->r_inode)
2434                 ceph_get_cap_refs(ceph_inode(req->r_inode), CEPH_CAP_PIN);
2435         if (req->r_parent)
2436                 ceph_get_cap_refs(ceph_inode(req->r_parent), CEPH_CAP_PIN);
2437         if (req->r_old_dentry_dir)
2438                 ceph_get_cap_refs(ceph_inode(req->r_old_dentry_dir),
2439                                   CEPH_CAP_PIN);
2440
2441         /* issue */
2442         mutex_lock(&mdsc->mutex);
2443         __register_request(mdsc, req, dir);
2444         __do_request(mdsc, req);
2445
2446         if (req->r_err) {
2447                 err = req->r_err;
2448                 goto out;
2449         }
2450
2451         /* wait */
2452         mutex_unlock(&mdsc->mutex);
2453         dout("do_request waiting\n");
2454         if (!req->r_timeout && req->r_wait_for_completion) {
2455                 err = req->r_wait_for_completion(mdsc, req);
2456         } else {
2457                 long timeleft = wait_for_completion_killable_timeout(
2458                                         &req->r_completion,
2459                                         ceph_timeout_jiffies(req->r_timeout));
2460                 if (timeleft > 0)
2461                         err = 0;
2462                 else if (!timeleft)
2463                         err = -EIO;  /* timed out */
2464                 else
2465                         err = timeleft;  /* killed */
2466         }
2467         dout("do_request waited, got %d\n", err);
2468         mutex_lock(&mdsc->mutex);
2469
2470         /* only abort if we didn't race with a real reply */
2471         if (test_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags)) {
2472                 err = le32_to_cpu(req->r_reply_info.head->result);
2473         } else if (err < 0) {
2474                 dout("aborted request %lld with %d\n", req->r_tid, err);
2475
2476                 /*
2477                  * ensure we aren't running concurrently with
2478                  * ceph_fill_trace or ceph_readdir_prepopulate, which
2479                  * rely on locks (dir mutex) held by our caller.
2480                  */
2481                 mutex_lock(&req->r_fill_mutex);
2482                 req->r_err = err;
2483                 set_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags);
2484                 mutex_unlock(&req->r_fill_mutex);
2485
2486                 if (req->r_parent &&
2487                     (req->r_op & CEPH_MDS_OP_WRITE))
2488                         ceph_invalidate_dir_request(req);
2489         } else {
2490                 err = req->r_err;
2491         }
2492
2493 out:
2494         mutex_unlock(&mdsc->mutex);
2495         dout("do_request %p done, result %d\n", req, err);
2496         return err;
2497 }
2498
2499 /*
2500  * Invalidate dir's completeness, dentry lease state on an aborted MDS
2501  * namespace request.
2502  */
2503 void ceph_invalidate_dir_request(struct ceph_mds_request *req)
2504 {
2505         struct inode *dir = req->r_parent;
2506         struct inode *old_dir = req->r_old_dentry_dir;
2507
2508         dout("invalidate_dir_request %p %p (complete, lease(s))\n", dir, old_dir);
2509
2510         ceph_dir_clear_complete(dir);
2511         if (old_dir)
2512                 ceph_dir_clear_complete(old_dir);
2513         if (req->r_dentry)
2514                 ceph_invalidate_dentry_lease(req->r_dentry);
2515         if (req->r_old_dentry)
2516                 ceph_invalidate_dentry_lease(req->r_old_dentry);
2517 }
2518
2519 /*
2520  * Handle mds reply.
2521  *
2522  * We take the session mutex and parse and process the reply immediately.
2523  * This preserves the logical ordering of replies, capabilities, etc., sent
2524  * by the MDS as they are applied to our local cache.
2525  */
2526 static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
2527 {
2528         struct ceph_mds_client *mdsc = session->s_mdsc;
2529         struct ceph_mds_request *req;
2530         struct ceph_mds_reply_head *head = msg->front.iov_base;
2531         struct ceph_mds_reply_info_parsed *rinfo;  /* parsed reply info */
2532         struct ceph_snap_realm *realm;
2533         u64 tid;
2534         int err, result;
2535         int mds = session->s_mds;
2536
2537         if (msg->front.iov_len < sizeof(*head)) {
2538                 pr_err("mdsc_handle_reply got corrupt (short) reply\n");
2539                 ceph_msg_dump(msg);
2540                 return;
2541         }
2542
2543         /* get request, session */
2544         tid = le64_to_cpu(msg->hdr.tid);
2545         mutex_lock(&mdsc->mutex);
2546         req = lookup_get_request(mdsc, tid);
2547         if (!req) {
2548                 dout("handle_reply on unknown tid %llu\n", tid);
2549                 mutex_unlock(&mdsc->mutex);
2550                 return;
2551         }
2552         dout("handle_reply %p\n", req);
2553
2554         /* correct session? */
2555         if (req->r_session != session) {
2556                 pr_err("mdsc_handle_reply got %llu on session mds%d"
2557                        " not mds%d\n", tid, session->s_mds,
2558                        req->r_session ? req->r_session->s_mds : -1);
2559                 mutex_unlock(&mdsc->mutex);
2560                 goto out;
2561         }
2562
2563         /* dup? */
2564         if ((test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags) && !head->safe) ||
2565             (test_bit(CEPH_MDS_R_GOT_SAFE, &req->r_req_flags) && head->safe)) {
2566                 pr_warn("got a dup %s reply on %llu from mds%d\n",
2567                            head->safe ? "safe" : "unsafe", tid, mds);
2568                 mutex_unlock(&mdsc->mutex);
2569                 goto out;
2570         }
2571         if (test_bit(CEPH_MDS_R_GOT_SAFE, &req->r_req_flags)) {
2572                 pr_warn("got unsafe after safe on %llu from mds%d\n",
2573                            tid, mds);
2574                 mutex_unlock(&mdsc->mutex);
2575                 goto out;
2576         }
2577
2578         result = le32_to_cpu(head->result);
2579
2580         /*
2581          * Handle an ESTALE
2582          * if we're not talking to the authority, send to them
2583          * if the authority has changed while we weren't looking,
2584          * send to new authority
2585          * Otherwise we just have to return an ESTALE
2586          */
2587         if (result == -ESTALE) {
2588                 dout("got ESTALE on request %llu\n", req->r_tid);
2589                 req->r_resend_mds = -1;
2590                 if (req->r_direct_mode != USE_AUTH_MDS) {
2591                         dout("not using auth, setting for that now\n");
2592                         req->r_direct_mode = USE_AUTH_MDS;
2593                         __do_request(mdsc, req);
2594                         mutex_unlock(&mdsc->mutex);
2595                         goto out;
2596                 } else  {
2597                         int mds = __choose_mds(mdsc, req);
2598                         if (mds >= 0 && mds != req->r_session->s_mds) {
2599                                 dout("but auth changed, so resending\n");
2600                                 __do_request(mdsc, req);
2601                                 mutex_unlock(&mdsc->mutex);
2602                                 goto out;
2603                         }
2604                 }
2605                 dout("have to return ESTALE on request %llu\n", req->r_tid);
2606         }
2607
2608
2609         if (head->safe) {
2610                 set_bit(CEPH_MDS_R_GOT_SAFE, &req->r_req_flags);
2611                 __unregister_request(mdsc, req);
2612
2613                 if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) {
2614                         /*
2615                          * We already handled the unsafe response, now do the
2616                          * cleanup.  No need to examine the response; the MDS
2617                          * doesn't include any result info in the safe
2618                          * response.  And even if it did, there is nothing
2619                          * useful we could do with a revised return value.
2620                          */
2621                         dout("got safe reply %llu, mds%d\n", tid, mds);
2622
2623                         /* last unsafe request during umount? */
2624                         if (mdsc->stopping && !__get_oldest_req(mdsc))
2625                                 complete_all(&mdsc->safe_umount_waiters);
2626                         mutex_unlock(&mdsc->mutex);
2627                         goto out;
2628                 }
2629         } else {
2630                 set_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags);
2631                 list_add_tail(&req->r_unsafe_item, &req->r_session->s_unsafe);
2632                 if (req->r_unsafe_dir) {
2633                         struct ceph_inode_info *ci =
2634                                         ceph_inode(req->r_unsafe_dir);
2635                         spin_lock(&ci->i_unsafe_lock);
2636                         list_add_tail(&req->r_unsafe_dir_item,
2637                                       &ci->i_unsafe_dirops);
2638                         spin_unlock(&ci->i_unsafe_lock);
2639                 }
2640         }
2641
2642         dout("handle_reply tid %lld result %d\n", tid, result);
2643         rinfo = &req->r_reply_info;
2644         err = parse_reply_info(msg, rinfo, session->s_con.peer_features);
2645         mutex_unlock(&mdsc->mutex);
2646
2647         mutex_lock(&session->s_mutex);
2648         if (err < 0) {
2649                 pr_err("mdsc_handle_reply got corrupt reply mds%d(tid:%lld)\n", mds, tid);
2650                 ceph_msg_dump(msg);
2651                 goto out_err;
2652         }
2653
2654         /* snap trace */
2655         realm = NULL;
2656         if (rinfo->snapblob_len) {
2657                 down_write(&mdsc->snap_rwsem);
2658                 ceph_update_snap_trace(mdsc, rinfo->snapblob,
2659                                 rinfo->snapblob + rinfo->snapblob_len,
2660                                 le32_to_cpu(head->op) == CEPH_MDS_OP_RMSNAP,
2661                                 &realm);
2662                 downgrade_write(&mdsc->snap_rwsem);
2663         } else {
2664                 down_read(&mdsc->snap_rwsem);
2665         }
2666
2667         /* insert trace into our cache */
2668         mutex_lock(&req->r_fill_mutex);
2669         current->journal_info = req;
2670         err = ceph_fill_trace(mdsc->fsc->sb, req);
2671         if (err == 0) {
2672                 if (result == 0 && (req->r_op == CEPH_MDS_OP_READDIR ||
2673                                     req->r_op == CEPH_MDS_OP_LSSNAP))
2674                         ceph_readdir_prepopulate(req, req->r_session);
2675                 ceph_unreserve_caps(mdsc, &req->r_caps_reservation);
2676         }
2677         current->journal_info = NULL;
2678         mutex_unlock(&req->r_fill_mutex);
2679
2680         up_read(&mdsc->snap_rwsem);
2681         if (realm)
2682                 ceph_put_snap_realm(mdsc, realm);
2683
2684         if (err == 0 && req->r_target_inode &&
2685             test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) {
2686                 struct ceph_inode_info *ci = ceph_inode(req->r_target_inode);
2687                 spin_lock(&ci->i_unsafe_lock);
2688                 list_add_tail(&req->r_unsafe_target_item, &ci->i_unsafe_iops);
2689                 spin_unlock(&ci->i_unsafe_lock);
2690         }
2691 out_err:
2692         mutex_lock(&mdsc->mutex);
2693         if (!test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags)) {
2694                 if (err) {
2695                         req->r_err = err;
2696                 } else {
2697                         req->r_reply =  ceph_msg_get(msg);
2698                         set_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags);
2699                 }
2700         } else {
2701                 dout("reply arrived after request %lld was aborted\n", tid);
2702         }
2703         mutex_unlock(&mdsc->mutex);
2704
2705         mutex_unlock(&session->s_mutex);
2706
2707         /* kick calling process */
2708         complete_request(mdsc, req);
2709 out:
2710         ceph_mdsc_put_request(req);
2711         return;
2712 }
2713
2714
2715
2716 /*
2717  * handle mds notification that our request has been forwarded.
2718  */
2719 static void handle_forward(struct ceph_mds_client *mdsc,
2720                            struct ceph_mds_session *session,
2721                            struct ceph_msg *msg)
2722 {
2723         struct ceph_mds_request *req;
2724         u64 tid = le64_to_cpu(msg->hdr.tid);
2725         u32 next_mds;
2726         u32 fwd_seq;
2727         int err = -EINVAL;
2728         void *p = msg->front.iov_base;
2729         void *end = p + msg->front.iov_len;
2730
2731         ceph_decode_need(&p, end, 2*sizeof(u32), bad);
2732         next_mds = ceph_decode_32(&p);
2733         fwd_seq = ceph_decode_32(&p);
2734
2735         mutex_lock(&mdsc->mutex);
2736         req = lookup_get_request(mdsc, tid);
2737         if (!req) {
2738                 dout("forward tid %llu to mds%d - req dne\n", tid, next_mds);
2739                 goto out;  /* dup reply? */
2740         }
2741
2742         if (test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags)) {
2743                 dout("forward tid %llu aborted, unregistering\n", tid);
2744                 __unregister_request(mdsc, req);
2745         } else if (fwd_seq <= req->r_num_fwd) {
2746                 dout("forward tid %llu to mds%d - old seq %d <= %d\n",
2747                      tid, next_mds, req->r_num_fwd, fwd_seq);
2748         } else {
2749                 /* resend. forward race not possible; mds would drop */
2750                 dout("forward tid %llu to mds%d (we resend)\n", tid, next_mds);
2751                 BUG_ON(req->r_err);
2752                 BUG_ON(test_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags));
2753                 req->r_attempts = 0;
2754                 req->r_num_fwd = fwd_seq;
2755                 req->r_resend_mds = next_mds;
2756                 put_request_session(req);
2757                 __do_request(mdsc, req);
2758         }
2759         ceph_mdsc_put_request(req);
2760 out:
2761         mutex_unlock(&mdsc->mutex);
2762         return;
2763
2764 bad:
2765         pr_err("mdsc_handle_forward decode error err=%d\n", err);
2766 }
2767
2768 /*
2769  * handle a mds session control message
2770  */
2771 static void handle_session(struct ceph_mds_session *session,
2772                            struct ceph_msg *msg)
2773 {
2774         struct ceph_mds_client *mdsc = session->s_mdsc;
2775         u32 op;
2776         u64 seq;
2777         int mds = session->s_mds;
2778         struct ceph_mds_session_head *h = msg->front.iov_base;
2779         int wake = 0;
2780
2781         /* decode */
2782         if (msg->front.iov_len < sizeof(*h))
2783                 goto bad;
2784         op = le32_to_cpu(h->op);
2785         seq = le64_to_cpu(h->seq);
2786
2787         mutex_lock(&mdsc->mutex);
2788         if (op == CEPH_SESSION_CLOSE) {
2789                 get_session(session);
2790                 __unregister_session(mdsc, session);
2791         }
2792         /* FIXME: this ttl calculation is generous */
2793         session->s_ttl = jiffies + HZ*mdsc->mdsmap->m_session_autoclose;
2794         mutex_unlock(&mdsc->mutex);
2795
2796         mutex_lock(&session->s_mutex);
2797
2798         dout("handle_session mds%d %s %p state %s seq %llu\n",
2799              mds, ceph_session_op_name(op), session,
2800              ceph_session_state_name(session->s_state), seq);
2801
2802         if (session->s_state == CEPH_MDS_SESSION_HUNG) {
2803                 session->s_state = CEPH_MDS_SESSION_OPEN;
2804                 pr_info("mds%d came back\n", session->s_mds);
2805         }
2806
2807         switch (op) {
2808         case CEPH_SESSION_OPEN:
2809                 if (session->s_state == CEPH_MDS_SESSION_RECONNECTING)
2810                         pr_info("mds%d reconnect success\n", session->s_mds);
2811                 session->s_state = CEPH_MDS_SESSION_OPEN;
2812                 renewed_caps(mdsc, session, 0);
2813                 wake = 1;
2814                 if (mdsc->stopping)
2815                         __close_session(mdsc, session);
2816                 break;
2817
2818         case CEPH_SESSION_RENEWCAPS:
2819                 if (session->s_renew_seq == seq)
2820                         renewed_caps(mdsc, session, 1);
2821                 break;
2822
2823         case CEPH_SESSION_CLOSE:
2824                 if (session->s_state == CEPH_MDS_SESSION_RECONNECTING)
2825                         pr_info("mds%d reconnect denied\n", session->s_mds);
2826                 cleanup_session_requests(mdsc, session);
2827                 remove_session_caps(session);
2828                 wake = 2; /* for good measure */
2829                 wake_up_all(&mdsc->session_close_wq);
2830                 break;
2831
2832         case CEPH_SESSION_STALE:
2833                 pr_info("mds%d caps went stale, renewing\n",
2834                         session->s_mds);
2835                 spin_lock(&session->s_gen_ttl_lock);
2836                 session->s_cap_gen++;
2837                 session->s_cap_ttl = jiffies - 1;
2838                 spin_unlock(&session->s_gen_ttl_lock);
2839                 send_renew_caps(mdsc, session);
2840                 break;
2841
2842         case CEPH_SESSION_RECALL_STATE:
2843                 ceph_trim_caps(mdsc, session, le32_to_cpu(h->max_caps));
2844                 break;
2845
2846         case CEPH_SESSION_FLUSHMSG:
2847                 send_flushmsg_ack(mdsc, session, seq);
2848                 break;
2849
2850         case CEPH_SESSION_FORCE_RO:
2851                 dout("force_session_readonly %p\n", session);
2852                 spin_lock(&session->s_cap_lock);
2853                 session->s_readonly = true;
2854                 spin_unlock(&session->s_cap_lock);
2855                 wake_up_session_caps(session, 0);
2856                 break;
2857
2858         case CEPH_SESSION_REJECT:
2859                 WARN_ON(session->s_state != CEPH_MDS_SESSION_OPENING);
2860                 pr_info("mds%d rejected session\n", session->s_mds);
2861                 session->s_state = CEPH_MDS_SESSION_REJECTED;
2862                 cleanup_session_requests(mdsc, session);
2863                 remove_session_caps(session);
2864                 wake = 2; /* for good measure */
2865                 break;
2866
2867         default:
2868                 pr_err("mdsc_handle_session bad op %d mds%d\n", op, mds);
2869                 WARN_ON(1);
2870         }
2871
2872         mutex_unlock(&session->s_mutex);
2873         if (wake) {
2874                 mutex_lock(&mdsc->mutex);
2875                 __wake_requests(mdsc, &session->s_waiting);
2876                 if (wake == 2)
2877                         kick_requests(mdsc, mds);
2878                 mutex_unlock(&mdsc->mutex);
2879         }
2880         if (op == CEPH_SESSION_CLOSE)
2881                 ceph_put_mds_session(session);
2882         return;
2883
2884 bad:
2885         pr_err("mdsc_handle_session corrupt message mds%d len %d\n", mds,
2886                (int)msg->front.iov_len);
2887         ceph_msg_dump(msg);
2888         return;
2889 }
2890
2891
2892 /*
2893  * called under session->mutex.
2894  */
2895 static void replay_unsafe_requests(struct ceph_mds_client *mdsc,
2896                                    struct ceph_mds_session *session)
2897 {
2898         struct ceph_mds_request *req, *nreq;
2899         struct rb_node *p;
2900         int err;
2901
2902         dout("replay_unsafe_requests mds%d\n", session->s_mds);
2903
2904         mutex_lock(&mdsc->mutex);
2905         list_for_each_entry_safe(req, nreq, &session->s_unsafe, r_unsafe_item) {
2906                 err = __prepare_send_request(mdsc, req, session->s_mds, true);
2907                 if (!err) {
2908                         ceph_msg_get(req->r_request);
2909                         ceph_con_send(&session->s_con, req->r_request);
2910                 }
2911         }
2912
2913         /*
2914          * also re-send old requests when MDS enters reconnect stage. So that MDS
2915          * can process completed request in clientreplay stage.
2916          */
2917         p = rb_first(&mdsc->request_tree);
2918         while (p) {
2919                 req = rb_entry(p, struct ceph_mds_request, r_node);
2920                 p = rb_next(p);
2921                 if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags))
2922                         continue;
2923                 if (req->r_attempts == 0)
2924                         continue; /* only old requests */
2925                 if (req->r_session &&
2926                     req->r_session->s_mds == session->s_mds) {
2927                         err = __prepare_send_request(mdsc, req,
2928                                                      session->s_mds, true);
2929                         if (!err) {
2930                                 ceph_msg_get(req->r_request);
2931                                 ceph_con_send(&session->s_con, req->r_request);
2932                         }
2933                 }
2934         }
2935         mutex_unlock(&mdsc->mutex);
2936 }
2937
2938 /*
2939  * Encode information about a cap for a reconnect with the MDS.
2940  */
2941 static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap,
2942                           void *arg)
2943 {
2944         union {
2945                 struct ceph_mds_cap_reconnect v2;
2946                 struct ceph_mds_cap_reconnect_v1 v1;
2947         } rec;
2948         struct ceph_inode_info *ci = cap->ci;
2949         struct ceph_reconnect_state *recon_state = arg;
2950         struct ceph_pagelist *pagelist = recon_state->pagelist;
2951         char *path;
2952         int pathlen, err;
2953         u64 pathbase;
2954         u64 snap_follows;
2955         struct dentry *dentry;
2956
2957         dout(" adding %p ino %llx.%llx cap %p %lld %s\n",
2958              inode, ceph_vinop(inode), cap, cap->cap_id,
2959              ceph_cap_string(cap->issued));
2960         err = ceph_pagelist_encode_64(pagelist, ceph_ino(inode));
2961         if (err)
2962                 return err;
2963
2964         dentry = d_find_alias(inode);
2965         if (dentry) {
2966                 path = ceph_mdsc_build_path(dentry, &pathlen, &pathbase, 0);
2967                 if (IS_ERR(path)) {
2968                         err = PTR_ERR(path);
2969                         goto out_dput;
2970                 }
2971         } else {
2972                 path = NULL;
2973                 pathlen = 0;
2974                 pathbase = 0;
2975         }
2976
2977         spin_lock(&ci->i_ceph_lock);
2978         cap->seq = 0;        /* reset cap seq */
2979         cap->issue_seq = 0;  /* and issue_seq */
2980         cap->mseq = 0;       /* and migrate_seq */
2981         cap->cap_gen = cap->session->s_cap_gen;
2982
2983         if (recon_state->msg_version >= 2) {
2984                 rec.v2.cap_id = cpu_to_le64(cap->cap_id);
2985                 rec.v2.wanted = cpu_to_le32(__ceph_caps_wanted(ci));
2986                 rec.v2.issued = cpu_to_le32(cap->issued);
2987                 rec.v2.snaprealm = cpu_to_le64(ci->i_snap_realm->ino);
2988                 rec.v2.pathbase = cpu_to_le64(pathbase);
2989                 rec.v2.flock_len = (__force __le32)
2990                         ((ci->i_ceph_flags & CEPH_I_ERROR_FILELOCK) ? 0 : 1);
2991         } else {
2992                 rec.v1.cap_id = cpu_to_le64(cap->cap_id);
2993                 rec.v1.wanted = cpu_to_le32(__ceph_caps_wanted(ci));
2994                 rec.v1.issued = cpu_to_le32(cap->issued);
2995                 rec.v1.size = cpu_to_le64(inode->i_size);
2996                 ceph_encode_timespec64(&rec.v1.mtime, &inode->i_mtime);
2997                 ceph_encode_timespec64(&rec.v1.atime, &inode->i_atime);
2998                 rec.v1.snaprealm = cpu_to_le64(ci->i_snap_realm->ino);
2999                 rec.v1.pathbase = cpu_to_le64(pathbase);
3000         }
3001
3002         if (list_empty(&ci->i_cap_snaps)) {
3003                 snap_follows = ci->i_head_snapc ? ci->i_head_snapc->seq : 0;
3004         } else {
3005                 struct ceph_cap_snap *capsnap =
3006                         list_first_entry(&ci->i_cap_snaps,
3007                                          struct ceph_cap_snap, ci_item);
3008                 snap_follows = capsnap->follows;
3009         }
3010         spin_unlock(&ci->i_ceph_lock);
3011
3012         if (recon_state->msg_version >= 2) {
3013                 int num_fcntl_locks, num_flock_locks;
3014                 struct ceph_filelock *flocks = NULL;
3015                 size_t struct_len, total_len = 0;
3016                 u8 struct_v = 0;
3017
3018 encode_again:
3019                 if (rec.v2.flock_len) {
3020                         ceph_count_locks(inode, &num_fcntl_locks, &num_flock_locks);
3021                 } else {
3022                         num_fcntl_locks = 0;
3023                         num_flock_locks = 0;
3024                 }
3025                 if (num_fcntl_locks + num_flock_locks > 0) {
3026                         flocks = kmalloc_array(num_fcntl_locks + num_flock_locks,
3027                                                sizeof(struct ceph_filelock),
3028                                                GFP_NOFS);
3029                         if (!flocks) {
3030                                 err = -ENOMEM;
3031                                 goto out_free;
3032                         }
3033                         err = ceph_encode_locks_to_buffer(inode, flocks,
3034                                                           num_fcntl_locks,
3035                                                           num_flock_locks);
3036                         if (err) {
3037                                 kfree(flocks);
3038                                 flocks = NULL;
3039                                 if (err == -ENOSPC)
3040                                         goto encode_again;
3041                                 goto out_free;
3042                         }
3043                 } else {
3044                         kfree(flocks);
3045                         flocks = NULL;
3046                 }
3047
3048                 if (recon_state->msg_version >= 3) {
3049                         /* version, compat_version and struct_len */
3050                         total_len = 2 * sizeof(u8) + sizeof(u32);
3051                         struct_v = 2;
3052                 }
3053                 /*
3054                  * number of encoded locks is stable, so copy to pagelist
3055                  */
3056                 struct_len = 2 * sizeof(u32) +
3057                             (num_fcntl_locks + num_flock_locks) *
3058                             sizeof(struct ceph_filelock);
3059                 rec.v2.flock_len = cpu_to_le32(struct_len);
3060
3061                 struct_len += sizeof(rec.v2);
3062                 struct_len += sizeof(u32) + pathlen;
3063
3064                 if (struct_v >= 2)
3065                         struct_len += sizeof(u64); /* snap_follows */
3066
3067                 total_len += struct_len;
3068                 err = ceph_pagelist_reserve(pagelist, total_len);
3069
3070                 if (!err) {
3071                         if (recon_state->msg_version >= 3) {
3072                                 ceph_pagelist_encode_8(pagelist, struct_v);
3073                                 ceph_pagelist_encode_8(pagelist, 1);
3074                                 ceph_pagelist_encode_32(pagelist, struct_len);
3075                         }
3076                         ceph_pagelist_encode_string(pagelist, path, pathlen);
3077                         ceph_pagelist_append(pagelist, &rec, sizeof(rec.v2));
3078                         ceph_locks_to_pagelist(flocks, pagelist,
3079                                                num_fcntl_locks,
3080                                                num_flock_locks);
3081                         if (struct_v >= 2)
3082                                 ceph_pagelist_encode_64(pagelist, snap_follows);
3083                 }
3084                 kfree(flocks);
3085         } else {
3086                 size_t size = sizeof(u32) + pathlen + sizeof(rec.v1);
3087                 err = ceph_pagelist_reserve(pagelist, size);
3088                 if (!err) {
3089                         ceph_pagelist_encode_string(pagelist, path, pathlen);
3090                         ceph_pagelist_append(pagelist, &rec, sizeof(rec.v1));
3091                 }
3092         }
3093
3094         recon_state->nr_caps++;
3095 out_free:
3096         kfree(path);
3097 out_dput:
3098         dput(dentry);
3099         return err;
3100 }
3101
3102
3103 /*
3104  * If an MDS fails and recovers, clients need to reconnect in order to
3105  * reestablish shared state.  This includes all caps issued through
3106  * this session _and_ the snap_realm hierarchy.  Because it's not
3107  * clear which snap realms the mds cares about, we send everything we
3108  * know about.. that ensures we'll then get any new info the
3109  * recovering MDS might have.
3110  *
3111  * This is a relatively heavyweight operation, but it's rare.
3112  *
3113  * called with mdsc->mutex held.
3114  */
3115 static void send_mds_reconnect(struct ceph_mds_client *mdsc,
3116                                struct ceph_mds_session *session)
3117 {
3118         struct ceph_msg *reply;
3119         struct rb_node *p;
3120         int mds = session->s_mds;
3121         int err = -ENOMEM;
3122         int s_nr_caps;
3123         struct ceph_pagelist *pagelist;
3124         struct ceph_reconnect_state recon_state;
3125         LIST_HEAD(dispose);
3126
3127         pr_info("mds%d reconnect start\n", mds);
3128
3129         pagelist = kmalloc(sizeof(*pagelist), GFP_NOFS);
3130         if (!pagelist)
3131                 goto fail_nopagelist;
3132         ceph_pagelist_init(pagelist);
3133
3134         reply = ceph_msg_new(CEPH_MSG_CLIENT_RECONNECT, 0, GFP_NOFS, false);
3135         if (!reply)
3136                 goto fail_nomsg;
3137
3138         mutex_lock(&session->s_mutex);
3139         session->s_state = CEPH_MDS_SESSION_RECONNECTING;
3140         session->s_seq = 0;
3141
3142         dout("session %p state %s\n", session,
3143              ceph_session_state_name(session->s_state));
3144
3145         spin_lock(&session->s_gen_ttl_lock);
3146         session->s_cap_gen++;
3147         spin_unlock(&session->s_gen_ttl_lock);
3148
3149         spin_lock(&session->s_cap_lock);
3150         /* don't know if session is readonly */
3151         session->s_readonly = 0;
3152         /*
3153          * notify __ceph_remove_cap() that we are composing cap reconnect.
3154          * If a cap get released before being added to the cap reconnect,
3155          * __ceph_remove_cap() should skip queuing cap release.
3156          */
3157         session->s_cap_reconnect = 1;
3158         /* drop old cap expires; we're about to reestablish that state */
3159         detach_cap_releases(session, &dispose);
3160         spin_unlock(&session->s_cap_lock);
3161         dispose_cap_releases(mdsc, &dispose);
3162
3163         /* trim unused caps to reduce MDS's cache rejoin time */
3164         if (mdsc->fsc->sb->s_root)
3165                 shrink_dcache_parent(mdsc->fsc->sb->s_root);
3166
3167         ceph_con_close(&session->s_con);
3168         ceph_con_open(&session->s_con,
3169                       CEPH_ENTITY_TYPE_MDS, mds,
3170                       ceph_mdsmap_get_addr(mdsc->mdsmap, mds));
3171
3172         /* replay unsafe requests */
3173         replay_unsafe_requests(mdsc, session);
3174
3175         down_read(&mdsc->snap_rwsem);
3176
3177         /* traverse this session's caps */
3178         s_nr_caps = session->s_nr_caps;
3179         err = ceph_pagelist_encode_32(pagelist, s_nr_caps);
3180         if (err)
3181                 goto fail;
3182
3183         recon_state.nr_caps = 0;
3184         recon_state.pagelist = pagelist;
3185         if (session->s_con.peer_features & CEPH_FEATURE_MDSENC)
3186                 recon_state.msg_version = 3;
3187         else if (session->s_con.peer_features & CEPH_FEATURE_FLOCK)
3188                 recon_state.msg_version = 2;
3189         else
3190                 recon_state.msg_version = 1;
3191         err = iterate_session_caps(session, encode_caps_cb, &recon_state);
3192         if (err < 0)
3193                 goto fail;
3194
3195         spin_lock(&session->s_cap_lock);
3196         session->s_cap_reconnect = 0;
3197         spin_unlock(&session->s_cap_lock);
3198
3199         /*
3200          * snaprealms.  we provide mds with the ino, seq (version), and
3201          * parent for all of our realms.  If the mds has any newer info,
3202          * it will tell us.
3203          */
3204         for (p = rb_first(&mdsc->snap_realms); p; p = rb_next(p)) {
3205                 struct ceph_snap_realm *realm =
3206                         rb_entry(p, struct ceph_snap_realm, node);
3207                 struct ceph_mds_snaprealm_reconnect sr_rec;
3208
3209                 dout(" adding snap realm %llx seq %lld parent %llx\n",
3210                      realm->ino, realm->seq, realm->parent_ino);
3211                 sr_rec.ino = cpu_to_le64(realm->ino);
3212                 sr_rec.seq = cpu_to_le64(realm->seq);
3213                 sr_rec.parent = cpu_to_le64(realm->parent_ino);
3214                 err = ceph_pagelist_append(pagelist, &sr_rec, sizeof(sr_rec));
3215                 if (err)
3216                         goto fail;
3217         }
3218
3219         reply->hdr.version = cpu_to_le16(recon_state.msg_version);
3220
3221         /* raced with cap release? */
3222         if (s_nr_caps != recon_state.nr_caps) {
3223                 struct page *page = list_first_entry(&pagelist->head,
3224                                                      struct page, lru);
3225                 __le32 *addr = kmap_atomic(page);
3226                 *addr = cpu_to_le32(recon_state.nr_caps);
3227                 kunmap_atomic(addr);
3228         }
3229
3230         reply->hdr.data_len = cpu_to_le32(pagelist->length);
3231         ceph_msg_data_add_pagelist(reply, pagelist);
3232
3233         ceph_early_kick_flushing_caps(mdsc, session);
3234
3235         ceph_con_send(&session->s_con, reply);
3236
3237         mutex_unlock(&session->s_mutex);
3238
3239         mutex_lock(&mdsc->mutex);
3240         __wake_requests(mdsc, &session->s_waiting);
3241         mutex_unlock(&mdsc->mutex);
3242
3243         up_read(&mdsc->snap_rwsem);
3244         return;
3245
3246 fail:
3247         ceph_msg_put(reply);
3248         up_read(&mdsc->snap_rwsem);
3249         mutex_unlock(&session->s_mutex);
3250 fail_nomsg:
3251         ceph_pagelist_release(pagelist);
3252 fail_nopagelist:
3253         pr_err("error %d preparing reconnect for mds%d\n", err, mds);
3254         return;
3255 }
3256
3257
3258 /*
3259  * compare old and new mdsmaps, kicking requests
3260  * and closing out old connections as necessary
3261  *
3262  * called under mdsc->mutex.
3263  */
3264 static void check_new_map(struct ceph_mds_client *mdsc,
3265                           struct ceph_mdsmap *newmap,
3266                           struct ceph_mdsmap *oldmap)
3267 {
3268         int i;
3269         int oldstate, newstate;
3270         struct ceph_mds_session *s;
3271
3272         dout("check_new_map new %u old %u\n",
3273              newmap->m_epoch, oldmap->m_epoch);
3274
3275         for (i = 0; i < oldmap->m_num_mds && i < mdsc->max_sessions; i++) {
3276                 if (!mdsc->sessions[i])
3277                         continue;
3278                 s = mdsc->sessions[i];
3279                 oldstate = ceph_mdsmap_get_state(oldmap, i);
3280                 newstate = ceph_mdsmap_get_state(newmap, i);
3281
3282                 dout("check_new_map mds%d state %s%s -> %s%s (session %s)\n",
3283                      i, ceph_mds_state_name(oldstate),
3284                      ceph_mdsmap_is_laggy(oldmap, i) ? " (laggy)" : "",
3285                      ceph_mds_state_name(newstate),
3286                      ceph_mdsmap_is_laggy(newmap, i) ? " (laggy)" : "",
3287                      ceph_session_state_name(s->s_state));
3288
3289                 if (i >= newmap->m_num_mds ||
3290                     memcmp(ceph_mdsmap_get_addr(oldmap, i),
3291                            ceph_mdsmap_get_addr(newmap, i),
3292                            sizeof(struct ceph_entity_addr))) {
3293                         if (s->s_state == CEPH_MDS_SESSION_OPENING) {
3294                                 /* the session never opened, just close it
3295                                  * out now */
3296                                 get_session(s);
3297                                 __unregister_session(mdsc, s);
3298                                 __wake_requests(mdsc, &s->s_waiting);
3299                                 ceph_put_mds_session(s);
3300                         } else if (i >= newmap->m_num_mds) {
3301                                 /* force close session for stopped mds */
3302                                 get_session(s);
3303                                 __unregister_session(mdsc, s);
3304                                 __wake_requests(mdsc, &s->s_waiting);
3305                                 kick_requests(mdsc, i);
3306                                 mutex_unlock(&mdsc->mutex);
3307
3308                                 mutex_lock(&s->s_mutex);
3309                                 cleanup_session_requests(mdsc, s);
3310                                 remove_session_caps(s);
3311                                 mutex_unlock(&s->s_mutex);
3312
3313                                 ceph_put_mds_session(s);
3314
3315                                 mutex_lock(&mdsc->mutex);
3316                         } else {
3317                                 /* just close it */
3318                                 mutex_unlock(&mdsc->mutex);
3319                                 mutex_lock(&s->s_mutex);
3320                                 mutex_lock(&mdsc->mutex);
3321                                 ceph_con_close(&s->s_con);
3322                                 mutex_unlock(&s->s_mutex);
3323                                 s->s_state = CEPH_MDS_SESSION_RESTARTING;
3324                         }
3325                 } else if (oldstate == newstate) {
3326                         continue;  /* nothing new with this mds */
3327                 }
3328
3329                 /*
3330                  * send reconnect?
3331                  */
3332                 if (s->s_state == CEPH_MDS_SESSION_RESTARTING &&
3333                     newstate >= CEPH_MDS_STATE_RECONNECT) {
3334                         mutex_unlock(&mdsc->mutex);
3335                         send_mds_reconnect(mdsc, s);
3336                         mutex_lock(&mdsc->mutex);
3337                 }
3338
3339                 /*
3340                  * kick request on any mds that has gone active.
3341                  */
3342                 if (oldstate < CEPH_MDS_STATE_ACTIVE &&
3343                     newstate >= CEPH_MDS_STATE_ACTIVE) {
3344                         if (oldstate != CEPH_MDS_STATE_CREATING &&
3345                             oldstate != CEPH_MDS_STATE_STARTING)
3346                                 pr_info("mds%d recovery completed\n", s->s_mds);
3347                         kick_requests(mdsc, i);
3348                         ceph_kick_flushing_caps(mdsc, s);
3349                         wake_up_session_caps(s, 1);
3350                 }
3351         }
3352
3353         for (i = 0; i < newmap->m_num_mds && i < mdsc->max_sessions; i++) {
3354                 s = mdsc->sessions[i];
3355                 if (!s)
3356                         continue;
3357                 if (!ceph_mdsmap_is_laggy(newmap, i))
3358                         continue;
3359                 if (s->s_state == CEPH_MDS_SESSION_OPEN ||
3360                     s->s_state == CEPH_MDS_SESSION_HUNG ||
3361                     s->s_state == CEPH_MDS_SESSION_CLOSING) {
3362                         dout(" connecting to export targets of laggy mds%d\n",
3363                              i);
3364                         __open_export_target_sessions(mdsc, s);
3365                 }
3366         }
3367 }
3368
3369
3370
3371 /*
3372  * leases
3373  */
3374
3375 /*
3376  * caller must hold session s_mutex, dentry->d_lock
3377  */
3378 void __ceph_mdsc_drop_dentry_lease(struct dentry *dentry)
3379 {
3380         struct ceph_dentry_info *di = ceph_dentry(dentry);
3381
3382         ceph_put_mds_session(di->lease_session);
3383         di->lease_session = NULL;
3384 }
3385
3386 static void handle_lease(struct ceph_mds_client *mdsc,
3387                          struct ceph_mds_session *session,
3388                          struct ceph_msg *msg)
3389 {
3390         struct super_block *sb = mdsc->fsc->sb;
3391         struct inode *inode;
3392         struct dentry *parent, *dentry;
3393         struct ceph_dentry_info *di;
3394         int mds = session->s_mds;
3395         struct ceph_mds_lease *h = msg->front.iov_base;
3396         u32 seq;
3397         struct ceph_vino vino;
3398         struct qstr dname;
3399         int release = 0;
3400
3401         dout("handle_lease from mds%d\n", mds);
3402
3403         /* decode */
3404         if (msg->front.iov_len < sizeof(*h) + sizeof(u32))
3405                 goto bad;
3406         vino.ino = le64_to_cpu(h->ino);
3407         vino.snap = CEPH_NOSNAP;
3408         seq = le32_to_cpu(h->seq);
3409         dname.len = get_unaligned_le32(h + 1);
3410         if (msg->front.iov_len < sizeof(*h) + sizeof(u32) + dname.len)
3411                 goto bad;
3412         dname.name = (void *)(h + 1) + sizeof(u32);
3413
3414         /* lookup inode */
3415         inode = ceph_find_inode(sb, vino);
3416         dout("handle_lease %s, ino %llx %p %.*s\n",
3417              ceph_lease_op_name(h->action), vino.ino, inode,
3418              dname.len, dname.name);
3419
3420         mutex_lock(&session->s_mutex);
3421         session->s_seq++;
3422
3423         if (!inode) {
3424                 dout("handle_lease no inode %llx\n", vino.ino);
3425                 goto release;
3426         }
3427
3428         /* dentry */
3429         parent = d_find_alias(inode);
3430         if (!parent) {
3431                 dout("no parent dentry on inode %p\n", inode);
3432                 WARN_ON(1);
3433                 goto release;  /* hrm... */
3434         }
3435         dname.hash = full_name_hash(parent, dname.name, dname.len);
3436         dentry = d_lookup(parent, &dname);
3437         dput(parent);
3438         if (!dentry)
3439                 goto release;
3440
3441         spin_lock(&dentry->d_lock);
3442         di = ceph_dentry(dentry);
3443         switch (h->action) {
3444         case CEPH_MDS_LEASE_REVOKE:
3445                 if (di->lease_session == session) {
3446                         if (ceph_seq_cmp(di->lease_seq, seq) > 0)
3447                                 h->seq = cpu_to_le32(di->lease_seq);
3448                         __ceph_mdsc_drop_dentry_lease(dentry);
3449                 }
3450                 release = 1;
3451                 break;
3452
3453         case CEPH_MDS_LEASE_RENEW:
3454                 if (di->lease_session == session &&
3455                     di->lease_gen == session->s_cap_gen &&
3456                     di->lease_renew_from &&
3457                     di->lease_renew_after == 0) {
3458                         unsigned long duration =
3459                                 msecs_to_jiffies(le32_to_cpu(h->duration_ms));
3460
3461                         di->lease_seq = seq;
3462                         di->time = di->lease_renew_from + duration;
3463                         di->lease_renew_after = di->lease_renew_from +
3464                                 (duration >> 1);
3465                         di->lease_renew_from = 0;
3466                 }
3467                 break;
3468         }
3469         spin_unlock(&dentry->d_lock);
3470         dput(dentry);
3471
3472         if (!release)
3473                 goto out;
3474
3475 release:
3476         /* let's just reuse the same message */
3477         h->action = CEPH_MDS_LEASE_REVOKE_ACK;
3478         ceph_msg_get(msg);
3479         ceph_con_send(&session->s_con, msg);
3480
3481 out:
3482         iput(inode);
3483         mutex_unlock(&session->s_mutex);
3484         return;
3485
3486 bad:
3487         pr_err("corrupt lease message\n");
3488         ceph_msg_dump(msg);
3489 }
3490
3491 void ceph_mdsc_lease_send_msg(struct ceph_mds_session *session,
3492                               struct inode *inode,
3493                               struct dentry *dentry, char action,
3494                               u32 seq)
3495 {
3496         struct ceph_msg *msg;
3497         struct ceph_mds_lease *lease;
3498         int len = sizeof(*lease) + sizeof(u32);
3499         int dnamelen = 0;
3500
3501         dout("lease_send_msg inode %p dentry %p %s to mds%d\n",
3502              inode, dentry, ceph_lease_op_name(action), session->s_mds);
3503         dnamelen = dentry->d_name.len;
3504         len += dnamelen;
3505
3506         msg = ceph_msg_new(CEPH_MSG_CLIENT_LEASE, len, GFP_NOFS, false);
3507         if (!msg)
3508                 return;
3509         lease = msg->front.iov_base;
3510         lease->action = action;
3511         lease->ino = cpu_to_le64(ceph_vino(inode).ino);
3512         lease->first = lease->last = cpu_to_le64(ceph_vino(inode).snap);
3513         lease->seq = cpu_to_le32(seq);
3514         put_unaligned_le32(dnamelen, lease + 1);
3515         memcpy((void *)(lease + 1) + 4, dentry->d_name.name, dnamelen);
3516
3517         /*
3518          * if this is a preemptive lease RELEASE, no need to
3519          * flush request stream, since the actual request will
3520          * soon follow.
3521          */
3522         msg->more_to_follow = (action == CEPH_MDS_LEASE_RELEASE);
3523
3524         ceph_con_send(&session->s_con, msg);
3525 }
3526
3527 /*
3528  * lock unlock sessions, to wait ongoing session activities
3529  */
3530 static void lock_unlock_sessions(struct ceph_mds_client *mdsc)
3531 {
3532         int i;
3533
3534         mutex_lock(&mdsc->mutex);
3535         for (i = 0; i < mdsc->max_sessions; i++) {
3536                 struct ceph_mds_session *s = __ceph_lookup_mds_session(mdsc, i);
3537                 if (!s)
3538                         continue;
3539                 mutex_unlock(&mdsc->mutex);
3540                 mutex_lock(&s->s_mutex);
3541                 mutex_unlock(&s->s_mutex);
3542                 ceph_put_mds_session(s);
3543                 mutex_lock(&mdsc->mutex);
3544         }
3545         mutex_unlock(&mdsc->mutex);
3546 }
3547
3548
3549
3550 /*
3551  * delayed work -- periodically trim expired leases, renew caps with mds
3552  */
3553 static void schedule_delayed(struct ceph_mds_client *mdsc)
3554 {
3555         int delay = 5;
3556         unsigned hz = round_jiffies_relative(HZ * delay);
3557         schedule_delayed_work(&mdsc->delayed_work, hz);
3558 }
3559
3560 static void delayed_work(struct work_struct *work)
3561 {
3562         int i;
3563         struct ceph_mds_client *mdsc =
3564                 container_of(work, struct ceph_mds_client, delayed_work.work);
3565         int renew_interval;
3566         int renew_caps;
3567
3568         dout("mdsc delayed_work\n");
3569         ceph_check_delayed_caps(mdsc);
3570
3571         mutex_lock(&mdsc->mutex);
3572         renew_interval = mdsc->mdsmap->m_session_timeout >> 2;
3573         renew_caps = time_after_eq(jiffies, HZ*renew_interval +
3574                                    mdsc->last_renew_caps);
3575         if (renew_caps)
3576                 mdsc->last_renew_caps = jiffies;
3577
3578         for (i = 0; i < mdsc->max_sessions; i++) {
3579                 struct ceph_mds_session *s = __ceph_lookup_mds_session(mdsc, i);
3580                 if (!s)
3581                         continue;
3582                 if (s->s_state == CEPH_MDS_SESSION_CLOSING) {
3583                         dout("resending session close request for mds%d\n",
3584                              s->s_mds);
3585                         request_close_session(mdsc, s);
3586                         ceph_put_mds_session(s);
3587                         continue;
3588                 }
3589                 if (s->s_ttl && time_after(jiffies, s->s_ttl)) {
3590                         if (s->s_state == CEPH_MDS_SESSION_OPEN) {
3591                                 s->s_state = CEPH_MDS_SESSION_HUNG;
3592                                 pr_info("mds%d hung\n", s->s_mds);
3593                         }
3594                 }
3595                 if (s->s_state < CEPH_MDS_SESSION_OPEN) {
3596                         /* this mds is failed or recovering, just wait */
3597                         ceph_put_mds_session(s);
3598                         continue;
3599                 }
3600                 mutex_unlock(&mdsc->mutex);
3601
3602                 mutex_lock(&s->s_mutex);
3603                 if (renew_caps)
3604                         send_renew_caps(mdsc, s);
3605                 else
3606                         ceph_con_keepalive(&s->s_con);
3607                 if (s->s_state == CEPH_MDS_SESSION_OPEN ||
3608                     s->s_state == CEPH_MDS_SESSION_HUNG)
3609                         ceph_send_cap_releases(mdsc, s);
3610                 mutex_unlock(&s->s_mutex);
3611                 ceph_put_mds_session(s);
3612
3613                 mutex_lock(&mdsc->mutex);
3614         }
3615         mutex_unlock(&mdsc->mutex);
3616
3617         schedule_delayed(mdsc);
3618 }
3619
3620 int ceph_mdsc_init(struct ceph_fs_client *fsc)
3621
3622 {
3623         struct ceph_mds_client *mdsc;
3624
3625         mdsc = kzalloc(sizeof(struct ceph_mds_client), GFP_NOFS);
3626         if (!mdsc)
3627                 return -ENOMEM;
3628         mdsc->fsc = fsc;
3629         mutex_init(&mdsc->mutex);
3630         mdsc->mdsmap = kzalloc(sizeof(*mdsc->mdsmap), GFP_NOFS);
3631         if (!mdsc->mdsmap) {
3632                 kfree(mdsc);
3633                 return -ENOMEM;
3634         }
3635
3636         fsc->mdsc = mdsc;
3637         init_completion(&mdsc->safe_umount_waiters);
3638         init_waitqueue_head(&mdsc->session_close_wq);
3639         INIT_LIST_HEAD(&mdsc->waiting_for_map);
3640         mdsc->sessions = NULL;
3641         atomic_set(&mdsc->num_sessions, 0);
3642         mdsc->max_sessions = 0;
3643         mdsc->stopping = 0;
3644         atomic64_set(&mdsc->quotarealms_count, 0);
3645         mdsc->last_snap_seq = 0;
3646         init_rwsem(&mdsc->snap_rwsem);
3647         mdsc->snap_realms = RB_ROOT;
3648         INIT_LIST_HEAD(&mdsc->snap_empty);
3649         spin_lock_init(&mdsc->snap_empty_lock);
3650         mdsc->last_tid = 0;
3651         mdsc->oldest_tid = 0;
3652         mdsc->request_tree = RB_ROOT;
3653         INIT_DELAYED_WORK(&mdsc->delayed_work, delayed_work);
3654         mdsc->last_renew_caps = jiffies;
3655         INIT_LIST_HEAD(&mdsc->cap_delay_list);
3656         spin_lock_init(&mdsc->cap_delay_lock);
3657         INIT_LIST_HEAD(&mdsc->snap_flush_list);
3658         spin_lock_init(&mdsc->snap_flush_lock);
3659         mdsc->last_cap_flush_tid = 1;
3660         INIT_LIST_HEAD(&mdsc->cap_flush_list);
3661         INIT_LIST_HEAD(&mdsc->cap_dirty);
3662         INIT_LIST_HEAD(&mdsc->cap_dirty_migrating);
3663         mdsc->num_cap_flushing = 0;
3664         spin_lock_init(&mdsc->cap_dirty_lock);
3665         init_waitqueue_head(&mdsc->cap_flushing_wq);
3666         spin_lock_init(&mdsc->dentry_lru_lock);
3667         INIT_LIST_HEAD(&mdsc->dentry_lru);
3668
3669         ceph_caps_init(mdsc);
3670         ceph_adjust_min_caps(mdsc, fsc->min_caps);
3671
3672         init_rwsem(&mdsc->pool_perm_rwsem);
3673         mdsc->pool_perm_tree = RB_ROOT;
3674
3675         strscpy(mdsc->nodename, utsname()->nodename,
3676                 sizeof(mdsc->nodename));
3677         return 0;
3678 }
3679
3680 /*
3681  * Wait for safe replies on open mds requests.  If we time out, drop
3682  * all requests from the tree to avoid dangling dentry refs.
3683  */
3684 static void wait_requests(struct ceph_mds_client *mdsc)
3685 {
3686         struct ceph_options *opts = mdsc->fsc->client->options;
3687         struct ceph_mds_request *req;
3688
3689         mutex_lock(&mdsc->mutex);
3690         if (__get_oldest_req(mdsc)) {
3691                 mutex_unlock(&mdsc->mutex);
3692
3693                 dout("wait_requests waiting for requests\n");
3694                 wait_for_completion_timeout(&mdsc->safe_umount_waiters,
3695                                     ceph_timeout_jiffies(opts->mount_timeout));
3696
3697                 /* tear down remaining requests */
3698                 mutex_lock(&mdsc->mutex);
3699                 while ((req = __get_oldest_req(mdsc))) {
3700                         dout("wait_requests timed out on tid %llu\n",
3701                              req->r_tid);
3702                         __unregister_request(mdsc, req);
3703                 }
3704         }
3705         mutex_unlock(&mdsc->mutex);
3706         dout("wait_requests done\n");
3707 }
3708
3709 /*
3710  * called before mount is ro, and before dentries are torn down.
3711  * (hmm, does this still race with new lookups?)
3712  */
3713 void ceph_mdsc_pre_umount(struct ceph_mds_client *mdsc)
3714 {
3715         dout("pre_umount\n");
3716         mdsc->stopping = 1;
3717
3718         lock_unlock_sessions(mdsc);
3719         ceph_flush_dirty_caps(mdsc);
3720         wait_requests(mdsc);
3721
3722         /*
3723          * wait for reply handlers to drop their request refs and
3724          * their inode/dcache refs
3725          */
3726         ceph_msgr_flush();
3727 }
3728
3729 /*
3730  * wait for all write mds requests to flush.
3731  */
3732 static void wait_unsafe_requests(struct ceph_mds_client *mdsc, u64 want_tid)
3733 {
3734         struct ceph_mds_request *req = NULL, *nextreq;
3735         struct rb_node *n;
3736
3737         mutex_lock(&mdsc->mutex);
3738         dout("wait_unsafe_requests want %lld\n", want_tid);
3739 restart:
3740         req = __get_oldest_req(mdsc);
3741         while (req && req->r_tid <= want_tid) {
3742                 /* find next request */
3743                 n = rb_next(&req->r_node);
3744                 if (n)
3745                         nextreq = rb_entry(n, struct ceph_mds_request, r_node);
3746                 else
3747                         nextreq = NULL;
3748                 if (req->r_op != CEPH_MDS_OP_SETFILELOCK &&
3749                     (req->r_op & CEPH_MDS_OP_WRITE)) {
3750                         /* write op */
3751                         ceph_mdsc_get_request(req);
3752                         if (nextreq)
3753                                 ceph_mdsc_get_request(nextreq);
3754                         mutex_unlock(&mdsc->mutex);
3755                         dout("wait_unsafe_requests  wait on %llu (want %llu)\n",
3756                              req->r_tid, want_tid);
3757                         wait_for_completion(&req->r_safe_completion);
3758                         mutex_lock(&mdsc->mutex);
3759                         ceph_mdsc_put_request(req);
3760                         if (!nextreq)
3761                                 break;  /* next dne before, so we're done! */
3762                         if (RB_EMPTY_NODE(&nextreq->r_node)) {
3763                                 /* next request was removed from tree */
3764                                 ceph_mdsc_put_request(nextreq);
3765                                 goto restart;
3766                         }
3767                         ceph_mdsc_put_request(nextreq);  /* won't go away */
3768                 }
3769                 req = nextreq;
3770         }
3771         mutex_unlock(&mdsc->mutex);
3772         dout("wait_unsafe_requests done\n");
3773 }
3774
3775 void ceph_mdsc_sync(struct ceph_mds_client *mdsc)
3776 {
3777         u64 want_tid, want_flush;
3778
3779         if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_SHUTDOWN)
3780                 return;
3781
3782         dout("sync\n");
3783         mutex_lock(&mdsc->mutex);
3784         want_tid = mdsc->last_tid;
3785         mutex_unlock(&mdsc->mutex);
3786
3787         ceph_flush_dirty_caps(mdsc);
3788         spin_lock(&mdsc->cap_dirty_lock);
3789         want_flush = mdsc->last_cap_flush_tid;
3790         if (!list_empty(&mdsc->cap_flush_list)) {
3791                 struct ceph_cap_flush *cf =
3792                         list_last_entry(&mdsc->cap_flush_list,
3793                                         struct ceph_cap_flush, g_list);
3794                 cf->wake = true;
3795         }
3796         spin_unlock(&mdsc->cap_dirty_lock);
3797
3798         dout("sync want tid %lld flush_seq %lld\n",
3799              want_tid, want_flush);
3800
3801         wait_unsafe_requests(mdsc, want_tid);
3802         wait_caps_flush(mdsc, want_flush);
3803 }
3804
3805 /*
3806  * true if all sessions are closed, or we force unmount
3807  */
3808 static bool done_closing_sessions(struct ceph_mds_client *mdsc, int skipped)
3809 {
3810         if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_SHUTDOWN)
3811                 return true;
3812         return atomic_read(&mdsc->num_sessions) <= skipped;
3813 }
3814
3815 /*
3816  * called after sb is ro.
3817  */
3818 void ceph_mdsc_close_sessions(struct ceph_mds_client *mdsc)
3819 {
3820         struct ceph_options *opts = mdsc->fsc->client->options;
3821         struct ceph_mds_session *session;
3822         int i;
3823         int skipped = 0;
3824
3825         dout("close_sessions\n");
3826
3827         /* close sessions */
3828         mutex_lock(&mdsc->mutex);
3829         for (i = 0; i < mdsc->max_sessions; i++) {
3830                 session = __ceph_lookup_mds_session(mdsc, i);
3831                 if (!session)
3832                         continue;
3833                 mutex_unlock(&mdsc->mutex);
3834                 mutex_lock(&session->s_mutex);
3835                 if (__close_session(mdsc, session) <= 0)
3836                         skipped++;
3837                 mutex_unlock(&session->s_mutex);
3838                 ceph_put_mds_session(session);
3839                 mutex_lock(&mdsc->mutex);
3840         }
3841         mutex_unlock(&mdsc->mutex);
3842
3843         dout("waiting for sessions to close\n");
3844         wait_event_timeout(mdsc->session_close_wq,
3845                            done_closing_sessions(mdsc, skipped),
3846                            ceph_timeout_jiffies(opts->mount_timeout));
3847
3848         /* tear down remaining sessions */
3849         mutex_lock(&mdsc->mutex);
3850         for (i = 0; i < mdsc->max_sessions; i++) {
3851                 if (mdsc->sessions[i]) {
3852                         session = get_session(mdsc->sessions[i]);
3853                         __unregister_session(mdsc, session);
3854                         mutex_unlock(&mdsc->mutex);
3855                         mutex_lock(&session->s_mutex);
3856                         remove_session_caps(session);
3857                         mutex_unlock(&session->s_mutex);
3858                         ceph_put_mds_session(session);
3859                         mutex_lock(&mdsc->mutex);
3860                 }
3861         }
3862         WARN_ON(!list_empty(&mdsc->cap_delay_list));
3863         mutex_unlock(&mdsc->mutex);
3864
3865         ceph_cleanup_empty_realms(mdsc);
3866
3867         cancel_delayed_work_sync(&mdsc->delayed_work); /* cancel timer */
3868
3869         dout("stopped\n");
3870 }
3871
3872 void ceph_mdsc_force_umount(struct ceph_mds_client *mdsc)
3873 {
3874         struct ceph_mds_session *session;
3875         int mds;
3876
3877         dout("force umount\n");
3878
3879         mutex_lock(&mdsc->mutex);
3880         for (mds = 0; mds < mdsc->max_sessions; mds++) {
3881                 session = __ceph_lookup_mds_session(mdsc, mds);
3882                 if (!session)
3883                         continue;
3884                 mutex_unlock(&mdsc->mutex);
3885                 mutex_lock(&session->s_mutex);
3886                 __close_session(mdsc, session);
3887                 if (session->s_state == CEPH_MDS_SESSION_CLOSING) {
3888                         cleanup_session_requests(mdsc, session);
3889                         remove_session_caps(session);
3890                 }
3891                 mutex_unlock(&session->s_mutex);
3892                 ceph_put_mds_session(session);
3893                 mutex_lock(&mdsc->mutex);
3894                 kick_requests(mdsc, mds);
3895         }
3896         __wake_requests(mdsc, &mdsc->waiting_for_map);
3897         mutex_unlock(&mdsc->mutex);
3898 }
3899
3900 static void ceph_mdsc_stop(struct ceph_mds_client *mdsc)
3901 {
3902         dout("stop\n");
3903         cancel_delayed_work_sync(&mdsc->delayed_work); /* cancel timer */
3904         if (mdsc->mdsmap)
3905                 ceph_mdsmap_destroy(mdsc->mdsmap);
3906         kfree(mdsc->sessions);
3907         ceph_caps_finalize(mdsc);
3908         ceph_pool_perm_destroy(mdsc);
3909 }
3910
3911 void ceph_mdsc_destroy(struct ceph_fs_client *fsc)
3912 {
3913         struct ceph_mds_client *mdsc = fsc->mdsc;
3914         dout("mdsc_destroy %p\n", mdsc);
3915
3916         if (!mdsc)
3917                 return;
3918
3919         /* flush out any connection work with references to us */
3920         ceph_msgr_flush();
3921
3922         ceph_mdsc_stop(mdsc);
3923
3924         fsc->mdsc = NULL;
3925         kfree(mdsc);
3926         dout("mdsc_destroy %p done\n", mdsc);
3927 }
3928
3929 void ceph_mdsc_handle_fsmap(struct ceph_mds_client *mdsc, struct ceph_msg *msg)
3930 {
3931         struct ceph_fs_client *fsc = mdsc->fsc;
3932         const char *mds_namespace = fsc->mount_options->mds_namespace;
3933         void *p = msg->front.iov_base;
3934         void *end = p + msg->front.iov_len;
3935         u32 epoch;
3936         u32 map_len;
3937         u32 num_fs;
3938         u32 mount_fscid = (u32)-1;
3939         u8 struct_v, struct_cv;
3940         int err = -EINVAL;
3941
3942         ceph_decode_need(&p, end, sizeof(u32), bad);
3943         epoch = ceph_decode_32(&p);
3944
3945         dout("handle_fsmap epoch %u\n", epoch);
3946
3947         ceph_decode_need(&p, end, 2 + sizeof(u32), bad);
3948         struct_v = ceph_decode_8(&p);
3949         struct_cv = ceph_decode_8(&p);
3950         map_len = ceph_decode_32(&p);
3951
3952         ceph_decode_need(&p, end, sizeof(u32) * 3, bad);
3953         p += sizeof(u32) * 2; /* skip epoch and legacy_client_fscid */
3954
3955         num_fs = ceph_decode_32(&p);
3956         while (num_fs-- > 0) {
3957                 void *info_p, *info_end;
3958                 u32 info_len;
3959                 u8 info_v, info_cv;
3960                 u32 fscid, namelen;
3961
3962                 ceph_decode_need(&p, end, 2 + sizeof(u32), bad);
3963                 info_v = ceph_decode_8(&p);
3964                 info_cv = ceph_decode_8(&p);
3965                 info_len = ceph_decode_32(&p);
3966                 ceph_decode_need(&p, end, info_len, bad);
3967                 info_p = p;
3968                 info_end = p + info_len;
3969                 p = info_end;
3970
3971                 ceph_decode_need(&info_p, info_end, sizeof(u32) * 2, bad);
3972                 fscid = ceph_decode_32(&info_p);
3973                 namelen = ceph_decode_32(&info_p);
3974                 ceph_decode_need(&info_p, info_end, namelen, bad);
3975
3976                 if (mds_namespace &&
3977                     strlen(mds_namespace) == namelen &&
3978                     !strncmp(mds_namespace, (char *)info_p, namelen)) {
3979                         mount_fscid = fscid;
3980                         break;
3981                 }
3982         }
3983
3984         ceph_monc_got_map(&fsc->client->monc, CEPH_SUB_FSMAP, epoch);
3985         if (mount_fscid != (u32)-1) {
3986                 fsc->client->monc.fs_cluster_id = mount_fscid;
3987                 ceph_monc_want_map(&fsc->client->monc, CEPH_SUB_MDSMAP,
3988                                    0, true);
3989                 ceph_monc_renew_subs(&fsc->client->monc);
3990         } else {
3991                 err = -ENOENT;
3992                 goto err_out;
3993         }
3994         return;
3995
3996 bad:
3997         pr_err("error decoding fsmap\n");
3998 err_out:
3999         mutex_lock(&mdsc->mutex);
4000         mdsc->mdsmap_err = err;
4001         __wake_requests(mdsc, &mdsc->waiting_for_map);
4002         mutex_unlock(&mdsc->mutex);
4003 }
4004
4005 /*
4006  * handle mds map update.
4007  */
4008 void ceph_mdsc_handle_mdsmap(struct ceph_mds_client *mdsc, struct ceph_msg *msg)
4009 {
4010         u32 epoch;
4011         u32 maplen;
4012         void *p = msg->front.iov_base;
4013         void *end = p + msg->front.iov_len;
4014         struct ceph_mdsmap *newmap, *oldmap;
4015         struct ceph_fsid fsid;
4016         int err = -EINVAL;
4017
4018         ceph_decode_need(&p, end, sizeof(fsid)+2*sizeof(u32), bad);
4019         ceph_decode_copy(&p, &fsid, sizeof(fsid));
4020         if (ceph_check_fsid(mdsc->fsc->client, &fsid) < 0)
4021                 return;
4022         epoch = ceph_decode_32(&p);
4023         maplen = ceph_decode_32(&p);
4024         dout("handle_map epoch %u len %d\n", epoch, (int)maplen);
4025
4026         /* do we need it? */
4027         mutex_lock(&mdsc->mutex);
4028         if (mdsc->mdsmap && epoch <= mdsc->mdsmap->m_epoch) {
4029                 dout("handle_map epoch %u <= our %u\n",
4030                      epoch, mdsc->mdsmap->m_epoch);
4031                 mutex_unlock(&mdsc->mutex);
4032                 return;
4033         }
4034
4035         newmap = ceph_mdsmap_decode(&p, end);
4036         if (IS_ERR(newmap)) {
4037                 err = PTR_ERR(newmap);
4038                 goto bad_unlock;
4039         }
4040
4041         /* swap into place */
4042         if (mdsc->mdsmap) {
4043                 oldmap = mdsc->mdsmap;
4044                 mdsc->mdsmap = newmap;
4045                 check_new_map(mdsc, newmap, oldmap);
4046                 ceph_mdsmap_destroy(oldmap);
4047         } else {
4048                 mdsc->mdsmap = newmap;  /* first mds map */
4049         }
4050         mdsc->fsc->max_file_size = min((loff_t)mdsc->mdsmap->m_max_file_size,
4051                                         MAX_LFS_FILESIZE);
4052
4053         __wake_requests(mdsc, &mdsc->waiting_for_map);
4054         ceph_monc_got_map(&mdsc->fsc->client->monc, CEPH_SUB_MDSMAP,
4055                           mdsc->mdsmap->m_epoch);
4056
4057         mutex_unlock(&mdsc->mutex);
4058         schedule_delayed(mdsc);
4059         return;
4060
4061 bad_unlock:
4062         mutex_unlock(&mdsc->mutex);
4063 bad:
4064         pr_err("error decoding mdsmap %d\n", err);
4065         return;
4066 }
4067
4068 static struct ceph_connection *con_get(struct ceph_connection *con)
4069 {
4070         struct ceph_mds_session *s = con->private;
4071
4072         if (get_session(s)) {
4073                 dout("mdsc con_get %p ok (%d)\n", s, refcount_read(&s->s_ref));
4074                 return con;
4075         }
4076         dout("mdsc con_get %p FAIL\n", s);
4077         return NULL;
4078 }
4079
4080 static void con_put(struct ceph_connection *con)
4081 {
4082         struct ceph_mds_session *s = con->private;
4083
4084         dout("mdsc con_put %p (%d)\n", s, refcount_read(&s->s_ref) - 1);
4085         ceph_put_mds_session(s);
4086 }
4087
4088 /*
4089  * if the client is unresponsive for long enough, the mds will kill
4090  * the session entirely.
4091  */
4092 static void peer_reset(struct ceph_connection *con)
4093 {
4094         struct ceph_mds_session *s = con->private;
4095         struct ceph_mds_client *mdsc = s->s_mdsc;
4096
4097         pr_warn("mds%d closed our session\n", s->s_mds);
4098         send_mds_reconnect(mdsc, s);
4099 }
4100
4101 static void dispatch(struct ceph_connection *con, struct ceph_msg *msg)
4102 {
4103         struct ceph_mds_session *s = con->private;
4104         struct ceph_mds_client *mdsc = s->s_mdsc;
4105         int type = le16_to_cpu(msg->hdr.type);
4106
4107         mutex_lock(&mdsc->mutex);
4108         if (__verify_registered_session(mdsc, s) < 0) {
4109                 mutex_unlock(&mdsc->mutex);
4110                 goto out;
4111         }
4112         mutex_unlock(&mdsc->mutex);
4113
4114         switch (type) {
4115         case CEPH_MSG_MDS_MAP:
4116                 ceph_mdsc_handle_mdsmap(mdsc, msg);
4117                 break;
4118         case CEPH_MSG_FS_MAP_USER:
4119                 ceph_mdsc_handle_fsmap(mdsc, msg);
4120                 break;
4121         case CEPH_MSG_CLIENT_SESSION:
4122                 handle_session(s, msg);
4123                 break;
4124         case CEPH_MSG_CLIENT_REPLY:
4125                 handle_reply(s, msg);
4126                 break;
4127         case CEPH_MSG_CLIENT_REQUEST_FORWARD:
4128                 handle_forward(mdsc, s, msg);
4129                 break;
4130         case CEPH_MSG_CLIENT_CAPS:
4131                 ceph_handle_caps(s, msg);
4132                 break;
4133         case CEPH_MSG_CLIENT_SNAP:
4134                 ceph_handle_snap(mdsc, s, msg);
4135                 break;
4136         case CEPH_MSG_CLIENT_LEASE:
4137                 handle_lease(mdsc, s, msg);
4138                 break;
4139         case CEPH_MSG_CLIENT_QUOTA:
4140                 ceph_handle_quota(mdsc, s, msg);
4141                 break;
4142
4143         default:
4144                 pr_err("received unknown message type %d %s\n", type,
4145                        ceph_msg_type_name(type));
4146         }
4147 out:
4148         ceph_msg_put(msg);
4149 }
4150
4151 /*
4152  * authentication
4153  */
4154
4155 /*
4156  * Note: returned pointer is the address of a structure that's
4157  * managed separately.  Caller must *not* attempt to free it.
4158  */
4159 static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con,
4160                                         int *proto, int force_new)
4161 {
4162         struct ceph_mds_session *s = con->private;
4163         struct ceph_mds_client *mdsc = s->s_mdsc;
4164         struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth;
4165         struct ceph_auth_handshake *auth = &s->s_auth;
4166
4167         if (force_new && auth->authorizer) {
4168                 ceph_auth_destroy_authorizer(auth->authorizer);
4169                 auth->authorizer = NULL;
4170         }
4171         if (!auth->authorizer) {
4172                 int ret = ceph_auth_create_authorizer(ac, CEPH_ENTITY_TYPE_MDS,
4173                                                       auth);
4174                 if (ret)
4175                         return ERR_PTR(ret);
4176         } else {
4177                 int ret = ceph_auth_update_authorizer(ac, CEPH_ENTITY_TYPE_MDS,
4178                                                       auth);
4179                 if (ret)
4180                         return ERR_PTR(ret);
4181         }
4182         *proto = ac->protocol;
4183
4184         return auth;
4185 }
4186
4187 static int add_authorizer_challenge(struct ceph_connection *con,
4188                                     void *challenge_buf, int challenge_buf_len)
4189 {
4190         struct ceph_mds_session *s = con->private;
4191         struct ceph_mds_client *mdsc = s->s_mdsc;
4192         struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth;
4193
4194         return ceph_auth_add_authorizer_challenge(ac, s->s_auth.authorizer,
4195                                             challenge_buf, challenge_buf_len);
4196 }
4197
4198 static int verify_authorizer_reply(struct ceph_connection *con)
4199 {
4200         struct ceph_mds_session *s = con->private;
4201         struct ceph_mds_client *mdsc = s->s_mdsc;
4202         struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth;
4203
4204         return ceph_auth_verify_authorizer_reply(ac, s->s_auth.authorizer);
4205 }
4206
4207 static int invalidate_authorizer(struct ceph_connection *con)
4208 {
4209         struct ceph_mds_session *s = con->private;
4210         struct ceph_mds_client *mdsc = s->s_mdsc;
4211         struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth;
4212
4213         ceph_auth_invalidate_authorizer(ac, CEPH_ENTITY_TYPE_MDS);
4214
4215         return ceph_monc_validate_auth(&mdsc->fsc->client->monc);
4216 }
4217
4218 static struct ceph_msg *mds_alloc_msg(struct ceph_connection *con,
4219                                 struct ceph_msg_header *hdr, int *skip)
4220 {
4221         struct ceph_msg *msg;
4222         int type = (int) le16_to_cpu(hdr->type);
4223         int front_len = (int) le32_to_cpu(hdr->front_len);
4224
4225         if (con->in_msg)
4226                 return con->in_msg;
4227
4228         *skip = 0;
4229         msg = ceph_msg_new(type, front_len, GFP_NOFS, false);
4230         if (!msg) {
4231                 pr_err("unable to allocate msg type %d len %d\n",
4232                        type, front_len);
4233                 return NULL;
4234         }
4235
4236         return msg;
4237 }
4238
4239 static int mds_sign_message(struct ceph_msg *msg)
4240 {
4241        struct ceph_mds_session *s = msg->con->private;
4242        struct ceph_auth_handshake *auth = &s->s_auth;
4243
4244        return ceph_auth_sign_message(auth, msg);
4245 }
4246
4247 static int mds_check_message_signature(struct ceph_msg *msg)
4248 {
4249        struct ceph_mds_session *s = msg->con->private;
4250        struct ceph_auth_handshake *auth = &s->s_auth;
4251
4252        return ceph_auth_check_message_signature(auth, msg);
4253 }
4254
4255 static const struct ceph_connection_operations mds_con_ops = {
4256         .get = con_get,
4257         .put = con_put,
4258         .dispatch = dispatch,
4259         .get_authorizer = get_authorizer,
4260         .add_authorizer_challenge = add_authorizer_challenge,
4261         .verify_authorizer_reply = verify_authorizer_reply,
4262         .invalidate_authorizer = invalidate_authorizer,
4263         .peer_reset = peer_reset,
4264         .alloc_msg = mds_alloc_msg,
4265         .sign_message = mds_sign_message,
4266         .check_message_signature = mds_check_message_signature,
4267 };
4268
4269 /* eof */