Merge branch 'fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/evalenti/linux...
[sfrench/cifs-2.6.git] / net / ceph / osd_client.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 #include <linux/ceph/ceph_debug.h>
4
5 #include <linux/module.h>
6 #include <linux/err.h>
7 #include <linux/highmem.h>
8 #include <linux/mm.h>
9 #include <linux/pagemap.h>
10 #include <linux/slab.h>
11 #include <linux/uaccess.h>
12 #ifdef CONFIG_BLOCK
13 #include <linux/bio.h>
14 #endif
15
16 #include <linux/ceph/ceph_features.h>
17 #include <linux/ceph/libceph.h>
18 #include <linux/ceph/osd_client.h>
19 #include <linux/ceph/messenger.h>
20 #include <linux/ceph/decode.h>
21 #include <linux/ceph/auth.h>
22 #include <linux/ceph/pagelist.h>
23 #include <linux/ceph/striper.h>
24
25 #define OSD_OPREPLY_FRONT_LEN   512
26
27 static struct kmem_cache        *ceph_osd_request_cache;
28
29 static const struct ceph_connection_operations osd_con_ops;
30
31 /*
32  * Implement client access to distributed object storage cluster.
33  *
34  * All data objects are stored within a cluster/cloud of OSDs, or
35  * "object storage devices."  (Note that Ceph OSDs have _nothing_ to
36  * do with the T10 OSD extensions to SCSI.)  Ceph OSDs are simply
37  * remote daemons serving up and coordinating consistent and safe
38  * access to storage.
39  *
40  * Cluster membership and the mapping of data objects onto storage devices
41  * are described by the osd map.
42  *
43  * We keep track of pending OSD requests (read, write), resubmit
44  * requests to different OSDs when the cluster topology/data layout
45  * change, or retry the affected requests when the communications
46  * channel with an OSD is reset.
47  */
48
49 static void link_request(struct ceph_osd *osd, struct ceph_osd_request *req);
50 static void unlink_request(struct ceph_osd *osd, struct ceph_osd_request *req);
51 static void link_linger(struct ceph_osd *osd,
52                         struct ceph_osd_linger_request *lreq);
53 static void unlink_linger(struct ceph_osd *osd,
54                           struct ceph_osd_linger_request *lreq);
55 static void clear_backoffs(struct ceph_osd *osd);
56
57 #if 1
58 static inline bool rwsem_is_wrlocked(struct rw_semaphore *sem)
59 {
60         bool wrlocked = true;
61
62         if (unlikely(down_read_trylock(sem))) {
63                 wrlocked = false;
64                 up_read(sem);
65         }
66
67         return wrlocked;
68 }
69 static inline void verify_osdc_locked(struct ceph_osd_client *osdc)
70 {
71         WARN_ON(!rwsem_is_locked(&osdc->lock));
72 }
73 static inline void verify_osdc_wrlocked(struct ceph_osd_client *osdc)
74 {
75         WARN_ON(!rwsem_is_wrlocked(&osdc->lock));
76 }
77 static inline void verify_osd_locked(struct ceph_osd *osd)
78 {
79         struct ceph_osd_client *osdc = osd->o_osdc;
80
81         WARN_ON(!(mutex_is_locked(&osd->lock) &&
82                   rwsem_is_locked(&osdc->lock)) &&
83                 !rwsem_is_wrlocked(&osdc->lock));
84 }
85 static inline void verify_lreq_locked(struct ceph_osd_linger_request *lreq)
86 {
87         WARN_ON(!mutex_is_locked(&lreq->lock));
88 }
89 #else
90 static inline void verify_osdc_locked(struct ceph_osd_client *osdc) { }
91 static inline void verify_osdc_wrlocked(struct ceph_osd_client *osdc) { }
92 static inline void verify_osd_locked(struct ceph_osd *osd) { }
93 static inline void verify_lreq_locked(struct ceph_osd_linger_request *lreq) { }
94 #endif
95
96 /*
97  * calculate the mapping of a file extent onto an object, and fill out the
98  * request accordingly.  shorten extent as necessary if it crosses an
99  * object boundary.
100  *
101  * fill osd op in request message.
102  */
103 static int calc_layout(struct ceph_file_layout *layout, u64 off, u64 *plen,
104                         u64 *objnum, u64 *objoff, u64 *objlen)
105 {
106         u64 orig_len = *plen;
107         u32 xlen;
108
109         /* object extent? */
110         ceph_calc_file_object_mapping(layout, off, orig_len, objnum,
111                                           objoff, &xlen);
112         *objlen = xlen;
113         if (*objlen < orig_len) {
114                 *plen = *objlen;
115                 dout(" skipping last %llu, final file extent %llu~%llu\n",
116                      orig_len - *plen, off, *plen);
117         }
118
119         dout("calc_layout objnum=%llx %llu~%llu\n", *objnum, *objoff, *objlen);
120         return 0;
121 }
122
123 static void ceph_osd_data_init(struct ceph_osd_data *osd_data)
124 {
125         memset(osd_data, 0, sizeof (*osd_data));
126         osd_data->type = CEPH_OSD_DATA_TYPE_NONE;
127 }
128
129 static void ceph_osd_data_pages_init(struct ceph_osd_data *osd_data,
130                         struct page **pages, u64 length, u32 alignment,
131                         bool pages_from_pool, bool own_pages)
132 {
133         osd_data->type = CEPH_OSD_DATA_TYPE_PAGES;
134         osd_data->pages = pages;
135         osd_data->length = length;
136         osd_data->alignment = alignment;
137         osd_data->pages_from_pool = pages_from_pool;
138         osd_data->own_pages = own_pages;
139 }
140
141 static void ceph_osd_data_pagelist_init(struct ceph_osd_data *osd_data,
142                         struct ceph_pagelist *pagelist)
143 {
144         osd_data->type = CEPH_OSD_DATA_TYPE_PAGELIST;
145         osd_data->pagelist = pagelist;
146 }
147
148 #ifdef CONFIG_BLOCK
149 static void ceph_osd_data_bio_init(struct ceph_osd_data *osd_data,
150                                    struct ceph_bio_iter *bio_pos,
151                                    u32 bio_length)
152 {
153         osd_data->type = CEPH_OSD_DATA_TYPE_BIO;
154         osd_data->bio_pos = *bio_pos;
155         osd_data->bio_length = bio_length;
156 }
157 #endif /* CONFIG_BLOCK */
158
159 static void ceph_osd_data_bvecs_init(struct ceph_osd_data *osd_data,
160                                      struct ceph_bvec_iter *bvec_pos,
161                                      u32 num_bvecs)
162 {
163         osd_data->type = CEPH_OSD_DATA_TYPE_BVECS;
164         osd_data->bvec_pos = *bvec_pos;
165         osd_data->num_bvecs = num_bvecs;
166 }
167
168 #define osd_req_op_data(oreq, whch, typ, fld)                           \
169 ({                                                                      \
170         struct ceph_osd_request *__oreq = (oreq);                       \
171         unsigned int __whch = (whch);                                   \
172         BUG_ON(__whch >= __oreq->r_num_ops);                            \
173         &__oreq->r_ops[__whch].typ.fld;                                 \
174 })
175
176 static struct ceph_osd_data *
177 osd_req_op_raw_data_in(struct ceph_osd_request *osd_req, unsigned int which)
178 {
179         BUG_ON(which >= osd_req->r_num_ops);
180
181         return &osd_req->r_ops[which].raw_data_in;
182 }
183
184 struct ceph_osd_data *
185 osd_req_op_extent_osd_data(struct ceph_osd_request *osd_req,
186                         unsigned int which)
187 {
188         return osd_req_op_data(osd_req, which, extent, osd_data);
189 }
190 EXPORT_SYMBOL(osd_req_op_extent_osd_data);
191
192 void osd_req_op_raw_data_in_pages(struct ceph_osd_request *osd_req,
193                         unsigned int which, struct page **pages,
194                         u64 length, u32 alignment,
195                         bool pages_from_pool, bool own_pages)
196 {
197         struct ceph_osd_data *osd_data;
198
199         osd_data = osd_req_op_raw_data_in(osd_req, which);
200         ceph_osd_data_pages_init(osd_data, pages, length, alignment,
201                                 pages_from_pool, own_pages);
202 }
203 EXPORT_SYMBOL(osd_req_op_raw_data_in_pages);
204
205 void osd_req_op_extent_osd_data_pages(struct ceph_osd_request *osd_req,
206                         unsigned int which, struct page **pages,
207                         u64 length, u32 alignment,
208                         bool pages_from_pool, bool own_pages)
209 {
210         struct ceph_osd_data *osd_data;
211
212         osd_data = osd_req_op_data(osd_req, which, extent, osd_data);
213         ceph_osd_data_pages_init(osd_data, pages, length, alignment,
214                                 pages_from_pool, own_pages);
215 }
216 EXPORT_SYMBOL(osd_req_op_extent_osd_data_pages);
217
218 void osd_req_op_extent_osd_data_pagelist(struct ceph_osd_request *osd_req,
219                         unsigned int which, struct ceph_pagelist *pagelist)
220 {
221         struct ceph_osd_data *osd_data;
222
223         osd_data = osd_req_op_data(osd_req, which, extent, osd_data);
224         ceph_osd_data_pagelist_init(osd_data, pagelist);
225 }
226 EXPORT_SYMBOL(osd_req_op_extent_osd_data_pagelist);
227
228 #ifdef CONFIG_BLOCK
229 void osd_req_op_extent_osd_data_bio(struct ceph_osd_request *osd_req,
230                                     unsigned int which,
231                                     struct ceph_bio_iter *bio_pos,
232                                     u32 bio_length)
233 {
234         struct ceph_osd_data *osd_data;
235
236         osd_data = osd_req_op_data(osd_req, which, extent, osd_data);
237         ceph_osd_data_bio_init(osd_data, bio_pos, bio_length);
238 }
239 EXPORT_SYMBOL(osd_req_op_extent_osd_data_bio);
240 #endif /* CONFIG_BLOCK */
241
242 void osd_req_op_extent_osd_data_bvecs(struct ceph_osd_request *osd_req,
243                                       unsigned int which,
244                                       struct bio_vec *bvecs, u32 num_bvecs,
245                                       u32 bytes)
246 {
247         struct ceph_osd_data *osd_data;
248         struct ceph_bvec_iter it = {
249                 .bvecs = bvecs,
250                 .iter = { .bi_size = bytes },
251         };
252
253         osd_data = osd_req_op_data(osd_req, which, extent, osd_data);
254         ceph_osd_data_bvecs_init(osd_data, &it, num_bvecs);
255 }
256 EXPORT_SYMBOL(osd_req_op_extent_osd_data_bvecs);
257
258 void osd_req_op_extent_osd_data_bvec_pos(struct ceph_osd_request *osd_req,
259                                          unsigned int which,
260                                          struct ceph_bvec_iter *bvec_pos)
261 {
262         struct ceph_osd_data *osd_data;
263
264         osd_data = osd_req_op_data(osd_req, which, extent, osd_data);
265         ceph_osd_data_bvecs_init(osd_data, bvec_pos, 0);
266 }
267 EXPORT_SYMBOL(osd_req_op_extent_osd_data_bvec_pos);
268
269 static void osd_req_op_cls_request_info_pagelist(
270                         struct ceph_osd_request *osd_req,
271                         unsigned int which, struct ceph_pagelist *pagelist)
272 {
273         struct ceph_osd_data *osd_data;
274
275         osd_data = osd_req_op_data(osd_req, which, cls, request_info);
276         ceph_osd_data_pagelist_init(osd_data, pagelist);
277 }
278
279 void osd_req_op_cls_request_data_pagelist(
280                         struct ceph_osd_request *osd_req,
281                         unsigned int which, struct ceph_pagelist *pagelist)
282 {
283         struct ceph_osd_data *osd_data;
284
285         osd_data = osd_req_op_data(osd_req, which, cls, request_data);
286         ceph_osd_data_pagelist_init(osd_data, pagelist);
287         osd_req->r_ops[which].cls.indata_len += pagelist->length;
288         osd_req->r_ops[which].indata_len += pagelist->length;
289 }
290 EXPORT_SYMBOL(osd_req_op_cls_request_data_pagelist);
291
292 void osd_req_op_cls_request_data_pages(struct ceph_osd_request *osd_req,
293                         unsigned int which, struct page **pages, u64 length,
294                         u32 alignment, bool pages_from_pool, bool own_pages)
295 {
296         struct ceph_osd_data *osd_data;
297
298         osd_data = osd_req_op_data(osd_req, which, cls, request_data);
299         ceph_osd_data_pages_init(osd_data, pages, length, alignment,
300                                 pages_from_pool, own_pages);
301         osd_req->r_ops[which].cls.indata_len += length;
302         osd_req->r_ops[which].indata_len += length;
303 }
304 EXPORT_SYMBOL(osd_req_op_cls_request_data_pages);
305
306 void osd_req_op_cls_request_data_bvecs(struct ceph_osd_request *osd_req,
307                                        unsigned int which,
308                                        struct bio_vec *bvecs, u32 num_bvecs,
309                                        u32 bytes)
310 {
311         struct ceph_osd_data *osd_data;
312         struct ceph_bvec_iter it = {
313                 .bvecs = bvecs,
314                 .iter = { .bi_size = bytes },
315         };
316
317         osd_data = osd_req_op_data(osd_req, which, cls, request_data);
318         ceph_osd_data_bvecs_init(osd_data, &it, num_bvecs);
319         osd_req->r_ops[which].cls.indata_len += bytes;
320         osd_req->r_ops[which].indata_len += bytes;
321 }
322 EXPORT_SYMBOL(osd_req_op_cls_request_data_bvecs);
323
324 void osd_req_op_cls_response_data_pages(struct ceph_osd_request *osd_req,
325                         unsigned int which, struct page **pages, u64 length,
326                         u32 alignment, bool pages_from_pool, bool own_pages)
327 {
328         struct ceph_osd_data *osd_data;
329
330         osd_data = osd_req_op_data(osd_req, which, cls, response_data);
331         ceph_osd_data_pages_init(osd_data, pages, length, alignment,
332                                 pages_from_pool, own_pages);
333 }
334 EXPORT_SYMBOL(osd_req_op_cls_response_data_pages);
335
336 static u64 ceph_osd_data_length(struct ceph_osd_data *osd_data)
337 {
338         switch (osd_data->type) {
339         case CEPH_OSD_DATA_TYPE_NONE:
340                 return 0;
341         case CEPH_OSD_DATA_TYPE_PAGES:
342                 return osd_data->length;
343         case CEPH_OSD_DATA_TYPE_PAGELIST:
344                 return (u64)osd_data->pagelist->length;
345 #ifdef CONFIG_BLOCK
346         case CEPH_OSD_DATA_TYPE_BIO:
347                 return (u64)osd_data->bio_length;
348 #endif /* CONFIG_BLOCK */
349         case CEPH_OSD_DATA_TYPE_BVECS:
350                 return osd_data->bvec_pos.iter.bi_size;
351         default:
352                 WARN(true, "unrecognized data type %d\n", (int)osd_data->type);
353                 return 0;
354         }
355 }
356
357 static void ceph_osd_data_release(struct ceph_osd_data *osd_data)
358 {
359         if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES && osd_data->own_pages) {
360                 int num_pages;
361
362                 num_pages = calc_pages_for((u64)osd_data->alignment,
363                                                 (u64)osd_data->length);
364                 ceph_release_page_vector(osd_data->pages, num_pages);
365         }
366         ceph_osd_data_init(osd_data);
367 }
368
369 static void osd_req_op_data_release(struct ceph_osd_request *osd_req,
370                         unsigned int which)
371 {
372         struct ceph_osd_req_op *op;
373
374         BUG_ON(which >= osd_req->r_num_ops);
375         op = &osd_req->r_ops[which];
376
377         switch (op->op) {
378         case CEPH_OSD_OP_READ:
379         case CEPH_OSD_OP_WRITE:
380         case CEPH_OSD_OP_WRITEFULL:
381                 ceph_osd_data_release(&op->extent.osd_data);
382                 break;
383         case CEPH_OSD_OP_CALL:
384                 ceph_osd_data_release(&op->cls.request_info);
385                 ceph_osd_data_release(&op->cls.request_data);
386                 ceph_osd_data_release(&op->cls.response_data);
387                 break;
388         case CEPH_OSD_OP_SETXATTR:
389         case CEPH_OSD_OP_CMPXATTR:
390                 ceph_osd_data_release(&op->xattr.osd_data);
391                 break;
392         case CEPH_OSD_OP_STAT:
393                 ceph_osd_data_release(&op->raw_data_in);
394                 break;
395         case CEPH_OSD_OP_NOTIFY_ACK:
396                 ceph_osd_data_release(&op->notify_ack.request_data);
397                 break;
398         case CEPH_OSD_OP_NOTIFY:
399                 ceph_osd_data_release(&op->notify.request_data);
400                 ceph_osd_data_release(&op->notify.response_data);
401                 break;
402         case CEPH_OSD_OP_LIST_WATCHERS:
403                 ceph_osd_data_release(&op->list_watchers.response_data);
404                 break;
405         default:
406                 break;
407         }
408 }
409
410 /*
411  * Assumes @t is zero-initialized.
412  */
413 static void target_init(struct ceph_osd_request_target *t)
414 {
415         ceph_oid_init(&t->base_oid);
416         ceph_oloc_init(&t->base_oloc);
417         ceph_oid_init(&t->target_oid);
418         ceph_oloc_init(&t->target_oloc);
419
420         ceph_osds_init(&t->acting);
421         ceph_osds_init(&t->up);
422         t->size = -1;
423         t->min_size = -1;
424
425         t->osd = CEPH_HOMELESS_OSD;
426 }
427
428 static void target_copy(struct ceph_osd_request_target *dest,
429                         const struct ceph_osd_request_target *src)
430 {
431         ceph_oid_copy(&dest->base_oid, &src->base_oid);
432         ceph_oloc_copy(&dest->base_oloc, &src->base_oloc);
433         ceph_oid_copy(&dest->target_oid, &src->target_oid);
434         ceph_oloc_copy(&dest->target_oloc, &src->target_oloc);
435
436         dest->pgid = src->pgid; /* struct */
437         dest->spgid = src->spgid; /* struct */
438         dest->pg_num = src->pg_num;
439         dest->pg_num_mask = src->pg_num_mask;
440         ceph_osds_copy(&dest->acting, &src->acting);
441         ceph_osds_copy(&dest->up, &src->up);
442         dest->size = src->size;
443         dest->min_size = src->min_size;
444         dest->sort_bitwise = src->sort_bitwise;
445
446         dest->flags = src->flags;
447         dest->paused = src->paused;
448
449         dest->epoch = src->epoch;
450         dest->last_force_resend = src->last_force_resend;
451
452         dest->osd = src->osd;
453 }
454
455 static void target_destroy(struct ceph_osd_request_target *t)
456 {
457         ceph_oid_destroy(&t->base_oid);
458         ceph_oloc_destroy(&t->base_oloc);
459         ceph_oid_destroy(&t->target_oid);
460         ceph_oloc_destroy(&t->target_oloc);
461 }
462
463 /*
464  * requests
465  */
466 static void request_release_checks(struct ceph_osd_request *req)
467 {
468         WARN_ON(!RB_EMPTY_NODE(&req->r_node));
469         WARN_ON(!RB_EMPTY_NODE(&req->r_mc_node));
470         WARN_ON(!list_empty(&req->r_unsafe_item));
471         WARN_ON(req->r_osd);
472 }
473
474 static void ceph_osdc_release_request(struct kref *kref)
475 {
476         struct ceph_osd_request *req = container_of(kref,
477                                             struct ceph_osd_request, r_kref);
478         unsigned int which;
479
480         dout("%s %p (r_request %p r_reply %p)\n", __func__, req,
481              req->r_request, req->r_reply);
482         request_release_checks(req);
483
484         if (req->r_request)
485                 ceph_msg_put(req->r_request);
486         if (req->r_reply)
487                 ceph_msg_put(req->r_reply);
488
489         for (which = 0; which < req->r_num_ops; which++)
490                 osd_req_op_data_release(req, which);
491
492         target_destroy(&req->r_t);
493         ceph_put_snap_context(req->r_snapc);
494
495         if (req->r_mempool)
496                 mempool_free(req, req->r_osdc->req_mempool);
497         else if (req->r_num_ops <= CEPH_OSD_SLAB_OPS)
498                 kmem_cache_free(ceph_osd_request_cache, req);
499         else
500                 kfree(req);
501 }
502
503 void ceph_osdc_get_request(struct ceph_osd_request *req)
504 {
505         dout("%s %p (was %d)\n", __func__, req,
506              kref_read(&req->r_kref));
507         kref_get(&req->r_kref);
508 }
509 EXPORT_SYMBOL(ceph_osdc_get_request);
510
511 void ceph_osdc_put_request(struct ceph_osd_request *req)
512 {
513         if (req) {
514                 dout("%s %p (was %d)\n", __func__, req,
515                      kref_read(&req->r_kref));
516                 kref_put(&req->r_kref, ceph_osdc_release_request);
517         }
518 }
519 EXPORT_SYMBOL(ceph_osdc_put_request);
520
521 static void request_init(struct ceph_osd_request *req)
522 {
523         /* req only, each op is zeroed in _osd_req_op_init() */
524         memset(req, 0, sizeof(*req));
525
526         kref_init(&req->r_kref);
527         init_completion(&req->r_completion);
528         RB_CLEAR_NODE(&req->r_node);
529         RB_CLEAR_NODE(&req->r_mc_node);
530         INIT_LIST_HEAD(&req->r_unsafe_item);
531
532         target_init(&req->r_t);
533 }
534
535 /*
536  * This is ugly, but it allows us to reuse linger registration and ping
537  * requests, keeping the structure of the code around send_linger{_ping}()
538  * reasonable.  Setting up a min_nr=2 mempool for each linger request
539  * and dealing with copying ops (this blasts req only, watch op remains
540  * intact) isn't any better.
541  */
542 static void request_reinit(struct ceph_osd_request *req)
543 {
544         struct ceph_osd_client *osdc = req->r_osdc;
545         bool mempool = req->r_mempool;
546         unsigned int num_ops = req->r_num_ops;
547         u64 snapid = req->r_snapid;
548         struct ceph_snap_context *snapc = req->r_snapc;
549         bool linger = req->r_linger;
550         struct ceph_msg *request_msg = req->r_request;
551         struct ceph_msg *reply_msg = req->r_reply;
552
553         dout("%s req %p\n", __func__, req);
554         WARN_ON(kref_read(&req->r_kref) != 1);
555         request_release_checks(req);
556
557         WARN_ON(kref_read(&request_msg->kref) != 1);
558         WARN_ON(kref_read(&reply_msg->kref) != 1);
559         target_destroy(&req->r_t);
560
561         request_init(req);
562         req->r_osdc = osdc;
563         req->r_mempool = mempool;
564         req->r_num_ops = num_ops;
565         req->r_snapid = snapid;
566         req->r_snapc = snapc;
567         req->r_linger = linger;
568         req->r_request = request_msg;
569         req->r_reply = reply_msg;
570 }
571
572 struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc,
573                                                struct ceph_snap_context *snapc,
574                                                unsigned int num_ops,
575                                                bool use_mempool,
576                                                gfp_t gfp_flags)
577 {
578         struct ceph_osd_request *req;
579
580         if (use_mempool) {
581                 BUG_ON(num_ops > CEPH_OSD_SLAB_OPS);
582                 req = mempool_alloc(osdc->req_mempool, gfp_flags);
583         } else if (num_ops <= CEPH_OSD_SLAB_OPS) {
584                 req = kmem_cache_alloc(ceph_osd_request_cache, gfp_flags);
585         } else {
586                 BUG_ON(num_ops > CEPH_OSD_MAX_OPS);
587                 req = kmalloc(struct_size(req, r_ops, num_ops), gfp_flags);
588         }
589         if (unlikely(!req))
590                 return NULL;
591
592         request_init(req);
593         req->r_osdc = osdc;
594         req->r_mempool = use_mempool;
595         req->r_num_ops = num_ops;
596         req->r_snapid = CEPH_NOSNAP;
597         req->r_snapc = ceph_get_snap_context(snapc);
598
599         dout("%s req %p\n", __func__, req);
600         return req;
601 }
602 EXPORT_SYMBOL(ceph_osdc_alloc_request);
603
604 static int ceph_oloc_encoding_size(const struct ceph_object_locator *oloc)
605 {
606         return 8 + 4 + 4 + 4 + (oloc->pool_ns ? oloc->pool_ns->len : 0);
607 }
608
609 int ceph_osdc_alloc_messages(struct ceph_osd_request *req, gfp_t gfp)
610 {
611         struct ceph_osd_client *osdc = req->r_osdc;
612         struct ceph_msg *msg;
613         int msg_size;
614
615         WARN_ON(ceph_oid_empty(&req->r_base_oid));
616         WARN_ON(ceph_oloc_empty(&req->r_base_oloc));
617
618         /* create request message */
619         msg_size = CEPH_ENCODING_START_BLK_LEN +
620                         CEPH_PGID_ENCODING_LEN + 1; /* spgid */
621         msg_size += 4 + 4 + 4; /* hash, osdmap_epoch, flags */
622         msg_size += CEPH_ENCODING_START_BLK_LEN +
623                         sizeof(struct ceph_osd_reqid); /* reqid */
624         msg_size += sizeof(struct ceph_blkin_trace_info); /* trace */
625         msg_size += 4 + sizeof(struct ceph_timespec); /* client_inc, mtime */
626         msg_size += CEPH_ENCODING_START_BLK_LEN +
627                         ceph_oloc_encoding_size(&req->r_base_oloc); /* oloc */
628         msg_size += 4 + req->r_base_oid.name_len; /* oid */
629         msg_size += 2 + req->r_num_ops * sizeof(struct ceph_osd_op);
630         msg_size += 8; /* snapid */
631         msg_size += 8; /* snap_seq */
632         msg_size += 4 + 8 * (req->r_snapc ? req->r_snapc->num_snaps : 0);
633         msg_size += 4 + 8; /* retry_attempt, features */
634
635         if (req->r_mempool)
636                 msg = ceph_msgpool_get(&osdc->msgpool_op, 0);
637         else
638                 msg = ceph_msg_new(CEPH_MSG_OSD_OP, msg_size, gfp, true);
639         if (!msg)
640                 return -ENOMEM;
641
642         memset(msg->front.iov_base, 0, msg->front.iov_len);
643         req->r_request = msg;
644
645         /* create reply message */
646         msg_size = OSD_OPREPLY_FRONT_LEN;
647         msg_size += req->r_base_oid.name_len;
648         msg_size += req->r_num_ops * sizeof(struct ceph_osd_op);
649
650         if (req->r_mempool)
651                 msg = ceph_msgpool_get(&osdc->msgpool_op_reply, 0);
652         else
653                 msg = ceph_msg_new(CEPH_MSG_OSD_OPREPLY, msg_size, gfp, true);
654         if (!msg)
655                 return -ENOMEM;
656
657         req->r_reply = msg;
658
659         return 0;
660 }
661 EXPORT_SYMBOL(ceph_osdc_alloc_messages);
662
663 static bool osd_req_opcode_valid(u16 opcode)
664 {
665         switch (opcode) {
666 #define GENERATE_CASE(op, opcode, str)  case CEPH_OSD_OP_##op: return true;
667 __CEPH_FORALL_OSD_OPS(GENERATE_CASE)
668 #undef GENERATE_CASE
669         default:
670                 return false;
671         }
672 }
673
674 /*
675  * This is an osd op init function for opcodes that have no data or
676  * other information associated with them.  It also serves as a
677  * common init routine for all the other init functions, below.
678  */
679 static struct ceph_osd_req_op *
680 _osd_req_op_init(struct ceph_osd_request *osd_req, unsigned int which,
681                  u16 opcode, u32 flags)
682 {
683         struct ceph_osd_req_op *op;
684
685         BUG_ON(which >= osd_req->r_num_ops);
686         BUG_ON(!osd_req_opcode_valid(opcode));
687
688         op = &osd_req->r_ops[which];
689         memset(op, 0, sizeof (*op));
690         op->op = opcode;
691         op->flags = flags;
692
693         return op;
694 }
695
696 void osd_req_op_init(struct ceph_osd_request *osd_req,
697                      unsigned int which, u16 opcode, u32 flags)
698 {
699         (void)_osd_req_op_init(osd_req, which, opcode, flags);
700 }
701 EXPORT_SYMBOL(osd_req_op_init);
702
703 void osd_req_op_extent_init(struct ceph_osd_request *osd_req,
704                                 unsigned int which, u16 opcode,
705                                 u64 offset, u64 length,
706                                 u64 truncate_size, u32 truncate_seq)
707 {
708         struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which,
709                                                       opcode, 0);
710         size_t payload_len = 0;
711
712         BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE &&
713                opcode != CEPH_OSD_OP_WRITEFULL && opcode != CEPH_OSD_OP_ZERO &&
714                opcode != CEPH_OSD_OP_TRUNCATE);
715
716         op->extent.offset = offset;
717         op->extent.length = length;
718         op->extent.truncate_size = truncate_size;
719         op->extent.truncate_seq = truncate_seq;
720         if (opcode == CEPH_OSD_OP_WRITE || opcode == CEPH_OSD_OP_WRITEFULL)
721                 payload_len += length;
722
723         op->indata_len = payload_len;
724 }
725 EXPORT_SYMBOL(osd_req_op_extent_init);
726
727 void osd_req_op_extent_update(struct ceph_osd_request *osd_req,
728                                 unsigned int which, u64 length)
729 {
730         struct ceph_osd_req_op *op;
731         u64 previous;
732
733         BUG_ON(which >= osd_req->r_num_ops);
734         op = &osd_req->r_ops[which];
735         previous = op->extent.length;
736
737         if (length == previous)
738                 return;         /* Nothing to do */
739         BUG_ON(length > previous);
740
741         op->extent.length = length;
742         if (op->op == CEPH_OSD_OP_WRITE || op->op == CEPH_OSD_OP_WRITEFULL)
743                 op->indata_len -= previous - length;
744 }
745 EXPORT_SYMBOL(osd_req_op_extent_update);
746
747 void osd_req_op_extent_dup_last(struct ceph_osd_request *osd_req,
748                                 unsigned int which, u64 offset_inc)
749 {
750         struct ceph_osd_req_op *op, *prev_op;
751
752         BUG_ON(which + 1 >= osd_req->r_num_ops);
753
754         prev_op = &osd_req->r_ops[which];
755         op = _osd_req_op_init(osd_req, which + 1, prev_op->op, prev_op->flags);
756         /* dup previous one */
757         op->indata_len = prev_op->indata_len;
758         op->outdata_len = prev_op->outdata_len;
759         op->extent = prev_op->extent;
760         /* adjust offset */
761         op->extent.offset += offset_inc;
762         op->extent.length -= offset_inc;
763
764         if (op->op == CEPH_OSD_OP_WRITE || op->op == CEPH_OSD_OP_WRITEFULL)
765                 op->indata_len -= offset_inc;
766 }
767 EXPORT_SYMBOL(osd_req_op_extent_dup_last);
768
769 int osd_req_op_cls_init(struct ceph_osd_request *osd_req, unsigned int which,
770                         u16 opcode, const char *class, const char *method)
771 {
772         struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which,
773                                                       opcode, 0);
774         struct ceph_pagelist *pagelist;
775         size_t payload_len = 0;
776         size_t size;
777
778         BUG_ON(opcode != CEPH_OSD_OP_CALL);
779
780         pagelist = kmalloc(sizeof (*pagelist), GFP_NOFS);
781         if (!pagelist)
782                 return -ENOMEM;
783
784         ceph_pagelist_init(pagelist);
785
786         op->cls.class_name = class;
787         size = strlen(class);
788         BUG_ON(size > (size_t) U8_MAX);
789         op->cls.class_len = size;
790         ceph_pagelist_append(pagelist, class, size);
791         payload_len += size;
792
793         op->cls.method_name = method;
794         size = strlen(method);
795         BUG_ON(size > (size_t) U8_MAX);
796         op->cls.method_len = size;
797         ceph_pagelist_append(pagelist, method, size);
798         payload_len += size;
799
800         osd_req_op_cls_request_info_pagelist(osd_req, which, pagelist);
801
802         op->indata_len = payload_len;
803         return 0;
804 }
805 EXPORT_SYMBOL(osd_req_op_cls_init);
806
807 int osd_req_op_xattr_init(struct ceph_osd_request *osd_req, unsigned int which,
808                           u16 opcode, const char *name, const void *value,
809                           size_t size, u8 cmp_op, u8 cmp_mode)
810 {
811         struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which,
812                                                       opcode, 0);
813         struct ceph_pagelist *pagelist;
814         size_t payload_len;
815
816         BUG_ON(opcode != CEPH_OSD_OP_SETXATTR && opcode != CEPH_OSD_OP_CMPXATTR);
817
818         pagelist = kmalloc(sizeof(*pagelist), GFP_NOFS);
819         if (!pagelist)
820                 return -ENOMEM;
821
822         ceph_pagelist_init(pagelist);
823
824         payload_len = strlen(name);
825         op->xattr.name_len = payload_len;
826         ceph_pagelist_append(pagelist, name, payload_len);
827
828         op->xattr.value_len = size;
829         ceph_pagelist_append(pagelist, value, size);
830         payload_len += size;
831
832         op->xattr.cmp_op = cmp_op;
833         op->xattr.cmp_mode = cmp_mode;
834
835         ceph_osd_data_pagelist_init(&op->xattr.osd_data, pagelist);
836         op->indata_len = payload_len;
837         return 0;
838 }
839 EXPORT_SYMBOL(osd_req_op_xattr_init);
840
841 /*
842  * @watch_opcode: CEPH_OSD_WATCH_OP_*
843  */
844 static void osd_req_op_watch_init(struct ceph_osd_request *req, int which,
845                                   u64 cookie, u8 watch_opcode)
846 {
847         struct ceph_osd_req_op *op;
848
849         op = _osd_req_op_init(req, which, CEPH_OSD_OP_WATCH, 0);
850         op->watch.cookie = cookie;
851         op->watch.op = watch_opcode;
852         op->watch.gen = 0;
853 }
854
855 void osd_req_op_alloc_hint_init(struct ceph_osd_request *osd_req,
856                                 unsigned int which,
857                                 u64 expected_object_size,
858                                 u64 expected_write_size)
859 {
860         struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which,
861                                                       CEPH_OSD_OP_SETALLOCHINT,
862                                                       0);
863
864         op->alloc_hint.expected_object_size = expected_object_size;
865         op->alloc_hint.expected_write_size = expected_write_size;
866
867         /*
868          * CEPH_OSD_OP_SETALLOCHINT op is advisory and therefore deemed
869          * not worth a feature bit.  Set FAILOK per-op flag to make
870          * sure older osds don't trip over an unsupported opcode.
871          */
872         op->flags |= CEPH_OSD_OP_FLAG_FAILOK;
873 }
874 EXPORT_SYMBOL(osd_req_op_alloc_hint_init);
875
876 static void ceph_osdc_msg_data_add(struct ceph_msg *msg,
877                                 struct ceph_osd_data *osd_data)
878 {
879         u64 length = ceph_osd_data_length(osd_data);
880
881         if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES) {
882                 BUG_ON(length > (u64) SIZE_MAX);
883                 if (length)
884                         ceph_msg_data_add_pages(msg, osd_data->pages,
885                                         length, osd_data->alignment);
886         } else if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGELIST) {
887                 BUG_ON(!length);
888                 ceph_msg_data_add_pagelist(msg, osd_data->pagelist);
889 #ifdef CONFIG_BLOCK
890         } else if (osd_data->type == CEPH_OSD_DATA_TYPE_BIO) {
891                 ceph_msg_data_add_bio(msg, &osd_data->bio_pos, length);
892 #endif
893         } else if (osd_data->type == CEPH_OSD_DATA_TYPE_BVECS) {
894                 ceph_msg_data_add_bvecs(msg, &osd_data->bvec_pos);
895         } else {
896                 BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_NONE);
897         }
898 }
899
900 static u32 osd_req_encode_op(struct ceph_osd_op *dst,
901                              const struct ceph_osd_req_op *src)
902 {
903         if (WARN_ON(!osd_req_opcode_valid(src->op))) {
904                 pr_err("unrecognized osd opcode %d\n", src->op);
905
906                 return 0;
907         }
908
909         switch (src->op) {
910         case CEPH_OSD_OP_STAT:
911                 break;
912         case CEPH_OSD_OP_READ:
913         case CEPH_OSD_OP_WRITE:
914         case CEPH_OSD_OP_WRITEFULL:
915         case CEPH_OSD_OP_ZERO:
916         case CEPH_OSD_OP_TRUNCATE:
917                 dst->extent.offset = cpu_to_le64(src->extent.offset);
918                 dst->extent.length = cpu_to_le64(src->extent.length);
919                 dst->extent.truncate_size =
920                         cpu_to_le64(src->extent.truncate_size);
921                 dst->extent.truncate_seq =
922                         cpu_to_le32(src->extent.truncate_seq);
923                 break;
924         case CEPH_OSD_OP_CALL:
925                 dst->cls.class_len = src->cls.class_len;
926                 dst->cls.method_len = src->cls.method_len;
927                 dst->cls.indata_len = cpu_to_le32(src->cls.indata_len);
928                 break;
929         case CEPH_OSD_OP_WATCH:
930                 dst->watch.cookie = cpu_to_le64(src->watch.cookie);
931                 dst->watch.ver = cpu_to_le64(0);
932                 dst->watch.op = src->watch.op;
933                 dst->watch.gen = cpu_to_le32(src->watch.gen);
934                 break;
935         case CEPH_OSD_OP_NOTIFY_ACK:
936                 break;
937         case CEPH_OSD_OP_NOTIFY:
938                 dst->notify.cookie = cpu_to_le64(src->notify.cookie);
939                 break;
940         case CEPH_OSD_OP_LIST_WATCHERS:
941                 break;
942         case CEPH_OSD_OP_SETALLOCHINT:
943                 dst->alloc_hint.expected_object_size =
944                     cpu_to_le64(src->alloc_hint.expected_object_size);
945                 dst->alloc_hint.expected_write_size =
946                     cpu_to_le64(src->alloc_hint.expected_write_size);
947                 break;
948         case CEPH_OSD_OP_SETXATTR:
949         case CEPH_OSD_OP_CMPXATTR:
950                 dst->xattr.name_len = cpu_to_le32(src->xattr.name_len);
951                 dst->xattr.value_len = cpu_to_le32(src->xattr.value_len);
952                 dst->xattr.cmp_op = src->xattr.cmp_op;
953                 dst->xattr.cmp_mode = src->xattr.cmp_mode;
954                 break;
955         case CEPH_OSD_OP_CREATE:
956         case CEPH_OSD_OP_DELETE:
957                 break;
958         default:
959                 pr_err("unsupported osd opcode %s\n",
960                         ceph_osd_op_name(src->op));
961                 WARN_ON(1);
962
963                 return 0;
964         }
965
966         dst->op = cpu_to_le16(src->op);
967         dst->flags = cpu_to_le32(src->flags);
968         dst->payload_len = cpu_to_le32(src->indata_len);
969
970         return src->indata_len;
971 }
972
973 /*
974  * build new request AND message, calculate layout, and adjust file
975  * extent as needed.
976  *
977  * if the file was recently truncated, we include information about its
978  * old and new size so that the object can be updated appropriately.  (we
979  * avoid synchronously deleting truncated objects because it's slow.)
980  */
981 struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc,
982                                                struct ceph_file_layout *layout,
983                                                struct ceph_vino vino,
984                                                u64 off, u64 *plen,
985                                                unsigned int which, int num_ops,
986                                                int opcode, int flags,
987                                                struct ceph_snap_context *snapc,
988                                                u32 truncate_seq,
989                                                u64 truncate_size,
990                                                bool use_mempool)
991 {
992         struct ceph_osd_request *req;
993         u64 objnum = 0;
994         u64 objoff = 0;
995         u64 objlen = 0;
996         int r;
997
998         BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE &&
999                opcode != CEPH_OSD_OP_ZERO && opcode != CEPH_OSD_OP_TRUNCATE &&
1000                opcode != CEPH_OSD_OP_CREATE && opcode != CEPH_OSD_OP_DELETE);
1001
1002         req = ceph_osdc_alloc_request(osdc, snapc, num_ops, use_mempool,
1003                                         GFP_NOFS);
1004         if (!req) {
1005                 r = -ENOMEM;
1006                 goto fail;
1007         }
1008
1009         /* calculate max write size */
1010         r = calc_layout(layout, off, plen, &objnum, &objoff, &objlen);
1011         if (r)
1012                 goto fail;
1013
1014         if (opcode == CEPH_OSD_OP_CREATE || opcode == CEPH_OSD_OP_DELETE) {
1015                 osd_req_op_init(req, which, opcode, 0);
1016         } else {
1017                 u32 object_size = layout->object_size;
1018                 u32 object_base = off - objoff;
1019                 if (!(truncate_seq == 1 && truncate_size == -1ULL)) {
1020                         if (truncate_size <= object_base) {
1021                                 truncate_size = 0;
1022                         } else {
1023                                 truncate_size -= object_base;
1024                                 if (truncate_size > object_size)
1025                                         truncate_size = object_size;
1026                         }
1027                 }
1028                 osd_req_op_extent_init(req, which, opcode, objoff, objlen,
1029                                        truncate_size, truncate_seq);
1030         }
1031
1032         req->r_flags = flags;
1033         req->r_base_oloc.pool = layout->pool_id;
1034         req->r_base_oloc.pool_ns = ceph_try_get_string(layout->pool_ns);
1035         ceph_oid_printf(&req->r_base_oid, "%llx.%08llx", vino.ino, objnum);
1036
1037         req->r_snapid = vino.snap;
1038         if (flags & CEPH_OSD_FLAG_WRITE)
1039                 req->r_data_offset = off;
1040
1041         r = ceph_osdc_alloc_messages(req, GFP_NOFS);
1042         if (r)
1043                 goto fail;
1044
1045         return req;
1046
1047 fail:
1048         ceph_osdc_put_request(req);
1049         return ERR_PTR(r);
1050 }
1051 EXPORT_SYMBOL(ceph_osdc_new_request);
1052
1053 /*
1054  * We keep osd requests in an rbtree, sorted by ->r_tid.
1055  */
1056 DEFINE_RB_FUNCS(request, struct ceph_osd_request, r_tid, r_node)
1057 DEFINE_RB_FUNCS(request_mc, struct ceph_osd_request, r_tid, r_mc_node)
1058
1059 /*
1060  * Call @fn on each OSD request as long as @fn returns 0.
1061  */
1062 static void for_each_request(struct ceph_osd_client *osdc,
1063                         int (*fn)(struct ceph_osd_request *req, void *arg),
1064                         void *arg)
1065 {
1066         struct rb_node *n, *p;
1067
1068         for (n = rb_first(&osdc->osds); n; n = rb_next(n)) {
1069                 struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
1070
1071                 for (p = rb_first(&osd->o_requests); p; ) {
1072                         struct ceph_osd_request *req =
1073                             rb_entry(p, struct ceph_osd_request, r_node);
1074
1075                         p = rb_next(p);
1076                         if (fn(req, arg))
1077                                 return;
1078                 }
1079         }
1080
1081         for (p = rb_first(&osdc->homeless_osd.o_requests); p; ) {
1082                 struct ceph_osd_request *req =
1083                     rb_entry(p, struct ceph_osd_request, r_node);
1084
1085                 p = rb_next(p);
1086                 if (fn(req, arg))
1087                         return;
1088         }
1089 }
1090
1091 static bool osd_homeless(struct ceph_osd *osd)
1092 {
1093         return osd->o_osd == CEPH_HOMELESS_OSD;
1094 }
1095
1096 static bool osd_registered(struct ceph_osd *osd)
1097 {
1098         verify_osdc_locked(osd->o_osdc);
1099
1100         return !RB_EMPTY_NODE(&osd->o_node);
1101 }
1102
1103 /*
1104  * Assumes @osd is zero-initialized.
1105  */
1106 static void osd_init(struct ceph_osd *osd)
1107 {
1108         refcount_set(&osd->o_ref, 1);
1109         RB_CLEAR_NODE(&osd->o_node);
1110         osd->o_requests = RB_ROOT;
1111         osd->o_linger_requests = RB_ROOT;
1112         osd->o_backoff_mappings = RB_ROOT;
1113         osd->o_backoffs_by_id = RB_ROOT;
1114         INIT_LIST_HEAD(&osd->o_osd_lru);
1115         INIT_LIST_HEAD(&osd->o_keepalive_item);
1116         osd->o_incarnation = 1;
1117         mutex_init(&osd->lock);
1118 }
1119
1120 static void osd_cleanup(struct ceph_osd *osd)
1121 {
1122         WARN_ON(!RB_EMPTY_NODE(&osd->o_node));
1123         WARN_ON(!RB_EMPTY_ROOT(&osd->o_requests));
1124         WARN_ON(!RB_EMPTY_ROOT(&osd->o_linger_requests));
1125         WARN_ON(!RB_EMPTY_ROOT(&osd->o_backoff_mappings));
1126         WARN_ON(!RB_EMPTY_ROOT(&osd->o_backoffs_by_id));
1127         WARN_ON(!list_empty(&osd->o_osd_lru));
1128         WARN_ON(!list_empty(&osd->o_keepalive_item));
1129
1130         if (osd->o_auth.authorizer) {
1131                 WARN_ON(osd_homeless(osd));
1132                 ceph_auth_destroy_authorizer(osd->o_auth.authorizer);
1133         }
1134 }
1135
1136 /*
1137  * Track open sessions with osds.
1138  */
1139 static struct ceph_osd *create_osd(struct ceph_osd_client *osdc, int onum)
1140 {
1141         struct ceph_osd *osd;
1142
1143         WARN_ON(onum == CEPH_HOMELESS_OSD);
1144
1145         osd = kzalloc(sizeof(*osd), GFP_NOIO | __GFP_NOFAIL);
1146         osd_init(osd);
1147         osd->o_osdc = osdc;
1148         osd->o_osd = onum;
1149
1150         ceph_con_init(&osd->o_con, osd, &osd_con_ops, &osdc->client->msgr);
1151
1152         return osd;
1153 }
1154
1155 static struct ceph_osd *get_osd(struct ceph_osd *osd)
1156 {
1157         if (refcount_inc_not_zero(&osd->o_ref)) {
1158                 dout("get_osd %p %d -> %d\n", osd, refcount_read(&osd->o_ref)-1,
1159                      refcount_read(&osd->o_ref));
1160                 return osd;
1161         } else {
1162                 dout("get_osd %p FAIL\n", osd);
1163                 return NULL;
1164         }
1165 }
1166
1167 static void put_osd(struct ceph_osd *osd)
1168 {
1169         dout("put_osd %p %d -> %d\n", osd, refcount_read(&osd->o_ref),
1170              refcount_read(&osd->o_ref) - 1);
1171         if (refcount_dec_and_test(&osd->o_ref)) {
1172                 osd_cleanup(osd);
1173                 kfree(osd);
1174         }
1175 }
1176
1177 DEFINE_RB_FUNCS(osd, struct ceph_osd, o_osd, o_node)
1178
1179 static void __move_osd_to_lru(struct ceph_osd *osd)
1180 {
1181         struct ceph_osd_client *osdc = osd->o_osdc;
1182
1183         dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
1184         BUG_ON(!list_empty(&osd->o_osd_lru));
1185
1186         spin_lock(&osdc->osd_lru_lock);
1187         list_add_tail(&osd->o_osd_lru, &osdc->osd_lru);
1188         spin_unlock(&osdc->osd_lru_lock);
1189
1190         osd->lru_ttl = jiffies + osdc->client->options->osd_idle_ttl;
1191 }
1192
1193 static void maybe_move_osd_to_lru(struct ceph_osd *osd)
1194 {
1195         if (RB_EMPTY_ROOT(&osd->o_requests) &&
1196             RB_EMPTY_ROOT(&osd->o_linger_requests))
1197                 __move_osd_to_lru(osd);
1198 }
1199
1200 static void __remove_osd_from_lru(struct ceph_osd *osd)
1201 {
1202         struct ceph_osd_client *osdc = osd->o_osdc;
1203
1204         dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
1205
1206         spin_lock(&osdc->osd_lru_lock);
1207         if (!list_empty(&osd->o_osd_lru))
1208                 list_del_init(&osd->o_osd_lru);
1209         spin_unlock(&osdc->osd_lru_lock);
1210 }
1211
1212 /*
1213  * Close the connection and assign any leftover requests to the
1214  * homeless session.
1215  */
1216 static void close_osd(struct ceph_osd *osd)
1217 {
1218         struct ceph_osd_client *osdc = osd->o_osdc;
1219         struct rb_node *n;
1220
1221         verify_osdc_wrlocked(osdc);
1222         dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
1223
1224         ceph_con_close(&osd->o_con);
1225
1226         for (n = rb_first(&osd->o_requests); n; ) {
1227                 struct ceph_osd_request *req =
1228                     rb_entry(n, struct ceph_osd_request, r_node);
1229
1230                 n = rb_next(n); /* unlink_request() */
1231
1232                 dout(" reassigning req %p tid %llu\n", req, req->r_tid);
1233                 unlink_request(osd, req);
1234                 link_request(&osdc->homeless_osd, req);
1235         }
1236         for (n = rb_first(&osd->o_linger_requests); n; ) {
1237                 struct ceph_osd_linger_request *lreq =
1238                     rb_entry(n, struct ceph_osd_linger_request, node);
1239
1240                 n = rb_next(n); /* unlink_linger() */
1241
1242                 dout(" reassigning lreq %p linger_id %llu\n", lreq,
1243                      lreq->linger_id);
1244                 unlink_linger(osd, lreq);
1245                 link_linger(&osdc->homeless_osd, lreq);
1246         }
1247         clear_backoffs(osd);
1248
1249         __remove_osd_from_lru(osd);
1250         erase_osd(&osdc->osds, osd);
1251         put_osd(osd);
1252 }
1253
1254 /*
1255  * reset osd connect
1256  */
1257 static int reopen_osd(struct ceph_osd *osd)
1258 {
1259         struct ceph_entity_addr *peer_addr;
1260
1261         dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
1262
1263         if (RB_EMPTY_ROOT(&osd->o_requests) &&
1264             RB_EMPTY_ROOT(&osd->o_linger_requests)) {
1265                 close_osd(osd);
1266                 return -ENODEV;
1267         }
1268
1269         peer_addr = &osd->o_osdc->osdmap->osd_addr[osd->o_osd];
1270         if (!memcmp(peer_addr, &osd->o_con.peer_addr, sizeof (*peer_addr)) &&
1271                         !ceph_con_opened(&osd->o_con)) {
1272                 struct rb_node *n;
1273
1274                 dout("osd addr hasn't changed and connection never opened, "
1275                      "letting msgr retry\n");
1276                 /* touch each r_stamp for handle_timeout()'s benfit */
1277                 for (n = rb_first(&osd->o_requests); n; n = rb_next(n)) {
1278                         struct ceph_osd_request *req =
1279                             rb_entry(n, struct ceph_osd_request, r_node);
1280                         req->r_stamp = jiffies;
1281                 }
1282
1283                 return -EAGAIN;
1284         }
1285
1286         ceph_con_close(&osd->o_con);
1287         ceph_con_open(&osd->o_con, CEPH_ENTITY_TYPE_OSD, osd->o_osd, peer_addr);
1288         osd->o_incarnation++;
1289
1290         return 0;
1291 }
1292
1293 static struct ceph_osd *lookup_create_osd(struct ceph_osd_client *osdc, int o,
1294                                           bool wrlocked)
1295 {
1296         struct ceph_osd *osd;
1297
1298         if (wrlocked)
1299                 verify_osdc_wrlocked(osdc);
1300         else
1301                 verify_osdc_locked(osdc);
1302
1303         if (o != CEPH_HOMELESS_OSD)
1304                 osd = lookup_osd(&osdc->osds, o);
1305         else
1306                 osd = &osdc->homeless_osd;
1307         if (!osd) {
1308                 if (!wrlocked)
1309                         return ERR_PTR(-EAGAIN);
1310
1311                 osd = create_osd(osdc, o);
1312                 insert_osd(&osdc->osds, osd);
1313                 ceph_con_open(&osd->o_con, CEPH_ENTITY_TYPE_OSD, osd->o_osd,
1314                               &osdc->osdmap->osd_addr[osd->o_osd]);
1315         }
1316
1317         dout("%s osdc %p osd%d -> osd %p\n", __func__, osdc, o, osd);
1318         return osd;
1319 }
1320
1321 /*
1322  * Create request <-> OSD session relation.
1323  *
1324  * @req has to be assigned a tid, @osd may be homeless.
1325  */
1326 static void link_request(struct ceph_osd *osd, struct ceph_osd_request *req)
1327 {
1328         verify_osd_locked(osd);
1329         WARN_ON(!req->r_tid || req->r_osd);
1330         dout("%s osd %p osd%d req %p tid %llu\n", __func__, osd, osd->o_osd,
1331              req, req->r_tid);
1332
1333         if (!osd_homeless(osd))
1334                 __remove_osd_from_lru(osd);
1335         else
1336                 atomic_inc(&osd->o_osdc->num_homeless);
1337
1338         get_osd(osd);
1339         insert_request(&osd->o_requests, req);
1340         req->r_osd = osd;
1341 }
1342
1343 static void unlink_request(struct ceph_osd *osd, struct ceph_osd_request *req)
1344 {
1345         verify_osd_locked(osd);
1346         WARN_ON(req->r_osd != osd);
1347         dout("%s osd %p osd%d req %p tid %llu\n", __func__, osd, osd->o_osd,
1348              req, req->r_tid);
1349
1350         req->r_osd = NULL;
1351         erase_request(&osd->o_requests, req);
1352         put_osd(osd);
1353
1354         if (!osd_homeless(osd))
1355                 maybe_move_osd_to_lru(osd);
1356         else
1357                 atomic_dec(&osd->o_osdc->num_homeless);
1358 }
1359
1360 static bool __pool_full(struct ceph_pg_pool_info *pi)
1361 {
1362         return pi->flags & CEPH_POOL_FLAG_FULL;
1363 }
1364
1365 static bool have_pool_full(struct ceph_osd_client *osdc)
1366 {
1367         struct rb_node *n;
1368
1369         for (n = rb_first(&osdc->osdmap->pg_pools); n; n = rb_next(n)) {
1370                 struct ceph_pg_pool_info *pi =
1371                     rb_entry(n, struct ceph_pg_pool_info, node);
1372
1373                 if (__pool_full(pi))
1374                         return true;
1375         }
1376
1377         return false;
1378 }
1379
1380 static bool pool_full(struct ceph_osd_client *osdc, s64 pool_id)
1381 {
1382         struct ceph_pg_pool_info *pi;
1383
1384         pi = ceph_pg_pool_by_id(osdc->osdmap, pool_id);
1385         if (!pi)
1386                 return false;
1387
1388         return __pool_full(pi);
1389 }
1390
1391 /*
1392  * Returns whether a request should be blocked from being sent
1393  * based on the current osdmap and osd_client settings.
1394  */
1395 static bool target_should_be_paused(struct ceph_osd_client *osdc,
1396                                     const struct ceph_osd_request_target *t,
1397                                     struct ceph_pg_pool_info *pi)
1398 {
1399         bool pauserd = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD);
1400         bool pausewr = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR) ||
1401                        ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
1402                        __pool_full(pi);
1403
1404         WARN_ON(pi->id != t->target_oloc.pool);
1405         return ((t->flags & CEPH_OSD_FLAG_READ) && pauserd) ||
1406                ((t->flags & CEPH_OSD_FLAG_WRITE) && pausewr) ||
1407                (osdc->osdmap->epoch < osdc->epoch_barrier);
1408 }
1409
1410 enum calc_target_result {
1411         CALC_TARGET_NO_ACTION = 0,
1412         CALC_TARGET_NEED_RESEND,
1413         CALC_TARGET_POOL_DNE,
1414 };
1415
1416 static enum calc_target_result calc_target(struct ceph_osd_client *osdc,
1417                                            struct ceph_osd_request_target *t,
1418                                            struct ceph_connection *con,
1419                                            bool any_change)
1420 {
1421         struct ceph_pg_pool_info *pi;
1422         struct ceph_pg pgid, last_pgid;
1423         struct ceph_osds up, acting;
1424         bool force_resend = false;
1425         bool unpaused = false;
1426         bool legacy_change;
1427         bool split = false;
1428         bool sort_bitwise = ceph_osdmap_flag(osdc, CEPH_OSDMAP_SORTBITWISE);
1429         bool recovery_deletes = ceph_osdmap_flag(osdc,
1430                                                  CEPH_OSDMAP_RECOVERY_DELETES);
1431         enum calc_target_result ct_res;
1432
1433         t->epoch = osdc->osdmap->epoch;
1434         pi = ceph_pg_pool_by_id(osdc->osdmap, t->base_oloc.pool);
1435         if (!pi) {
1436                 t->osd = CEPH_HOMELESS_OSD;
1437                 ct_res = CALC_TARGET_POOL_DNE;
1438                 goto out;
1439         }
1440
1441         if (osdc->osdmap->epoch == pi->last_force_request_resend) {
1442                 if (t->last_force_resend < pi->last_force_request_resend) {
1443                         t->last_force_resend = pi->last_force_request_resend;
1444                         force_resend = true;
1445                 } else if (t->last_force_resend == 0) {
1446                         force_resend = true;
1447                 }
1448         }
1449
1450         /* apply tiering */
1451         ceph_oid_copy(&t->target_oid, &t->base_oid);
1452         ceph_oloc_copy(&t->target_oloc, &t->base_oloc);
1453         if ((t->flags & CEPH_OSD_FLAG_IGNORE_OVERLAY) == 0) {
1454                 if (t->flags & CEPH_OSD_FLAG_READ && pi->read_tier >= 0)
1455                         t->target_oloc.pool = pi->read_tier;
1456                 if (t->flags & CEPH_OSD_FLAG_WRITE && pi->write_tier >= 0)
1457                         t->target_oloc.pool = pi->write_tier;
1458
1459                 pi = ceph_pg_pool_by_id(osdc->osdmap, t->target_oloc.pool);
1460                 if (!pi) {
1461                         t->osd = CEPH_HOMELESS_OSD;
1462                         ct_res = CALC_TARGET_POOL_DNE;
1463                         goto out;
1464                 }
1465         }
1466
1467         __ceph_object_locator_to_pg(pi, &t->target_oid, &t->target_oloc, &pgid);
1468         last_pgid.pool = pgid.pool;
1469         last_pgid.seed = ceph_stable_mod(pgid.seed, t->pg_num, t->pg_num_mask);
1470
1471         ceph_pg_to_up_acting_osds(osdc->osdmap, pi, &pgid, &up, &acting);
1472         if (any_change &&
1473             ceph_is_new_interval(&t->acting,
1474                                  &acting,
1475                                  &t->up,
1476                                  &up,
1477                                  t->size,
1478                                  pi->size,
1479                                  t->min_size,
1480                                  pi->min_size,
1481                                  t->pg_num,
1482                                  pi->pg_num,
1483                                  t->sort_bitwise,
1484                                  sort_bitwise,
1485                                  t->recovery_deletes,
1486                                  recovery_deletes,
1487                                  &last_pgid))
1488                 force_resend = true;
1489
1490         if (t->paused && !target_should_be_paused(osdc, t, pi)) {
1491                 t->paused = false;
1492                 unpaused = true;
1493         }
1494         legacy_change = ceph_pg_compare(&t->pgid, &pgid) ||
1495                         ceph_osds_changed(&t->acting, &acting, any_change);
1496         if (t->pg_num)
1497                 split = ceph_pg_is_split(&last_pgid, t->pg_num, pi->pg_num);
1498
1499         if (legacy_change || force_resend || split) {
1500                 t->pgid = pgid; /* struct */
1501                 ceph_pg_to_primary_shard(osdc->osdmap, pi, &pgid, &t->spgid);
1502                 ceph_osds_copy(&t->acting, &acting);
1503                 ceph_osds_copy(&t->up, &up);
1504                 t->size = pi->size;
1505                 t->min_size = pi->min_size;
1506                 t->pg_num = pi->pg_num;
1507                 t->pg_num_mask = pi->pg_num_mask;
1508                 t->sort_bitwise = sort_bitwise;
1509                 t->recovery_deletes = recovery_deletes;
1510
1511                 t->osd = acting.primary;
1512         }
1513
1514         if (unpaused || legacy_change || force_resend ||
1515             (split && con && CEPH_HAVE_FEATURE(con->peer_features,
1516                                                RESEND_ON_SPLIT)))
1517                 ct_res = CALC_TARGET_NEED_RESEND;
1518         else
1519                 ct_res = CALC_TARGET_NO_ACTION;
1520
1521 out:
1522         dout("%s t %p -> ct_res %d osd %d\n", __func__, t, ct_res, t->osd);
1523         return ct_res;
1524 }
1525
1526 static struct ceph_spg_mapping *alloc_spg_mapping(void)
1527 {
1528         struct ceph_spg_mapping *spg;
1529
1530         spg = kmalloc(sizeof(*spg), GFP_NOIO);
1531         if (!spg)
1532                 return NULL;
1533
1534         RB_CLEAR_NODE(&spg->node);
1535         spg->backoffs = RB_ROOT;
1536         return spg;
1537 }
1538
1539 static void free_spg_mapping(struct ceph_spg_mapping *spg)
1540 {
1541         WARN_ON(!RB_EMPTY_NODE(&spg->node));
1542         WARN_ON(!RB_EMPTY_ROOT(&spg->backoffs));
1543
1544         kfree(spg);
1545 }
1546
1547 /*
1548  * rbtree of ceph_spg_mapping for handling map<spg_t, ...>, similar to
1549  * ceph_pg_mapping.  Used to track OSD backoffs -- a backoff [range] is
1550  * defined only within a specific spgid; it does not pass anything to
1551  * children on split, or to another primary.
1552  */
1553 DEFINE_RB_FUNCS2(spg_mapping, struct ceph_spg_mapping, spgid, ceph_spg_compare,
1554                  RB_BYPTR, const struct ceph_spg *, node)
1555
1556 static u64 hoid_get_bitwise_key(const struct ceph_hobject_id *hoid)
1557 {
1558         return hoid->is_max ? 0x100000000ull : hoid->hash_reverse_bits;
1559 }
1560
1561 static void hoid_get_effective_key(const struct ceph_hobject_id *hoid,
1562                                    void **pkey, size_t *pkey_len)
1563 {
1564         if (hoid->key_len) {
1565                 *pkey = hoid->key;
1566                 *pkey_len = hoid->key_len;
1567         } else {
1568                 *pkey = hoid->oid;
1569                 *pkey_len = hoid->oid_len;
1570         }
1571 }
1572
1573 static int compare_names(const void *name1, size_t name1_len,
1574                          const void *name2, size_t name2_len)
1575 {
1576         int ret;
1577
1578         ret = memcmp(name1, name2, min(name1_len, name2_len));
1579         if (!ret) {
1580                 if (name1_len < name2_len)
1581                         ret = -1;
1582                 else if (name1_len > name2_len)
1583                         ret = 1;
1584         }
1585         return ret;
1586 }
1587
1588 static int hoid_compare(const struct ceph_hobject_id *lhs,
1589                         const struct ceph_hobject_id *rhs)
1590 {
1591         void *effective_key1, *effective_key2;
1592         size_t effective_key1_len, effective_key2_len;
1593         int ret;
1594
1595         if (lhs->is_max < rhs->is_max)
1596                 return -1;
1597         if (lhs->is_max > rhs->is_max)
1598                 return 1;
1599
1600         if (lhs->pool < rhs->pool)
1601                 return -1;
1602         if (lhs->pool > rhs->pool)
1603                 return 1;
1604
1605         if (hoid_get_bitwise_key(lhs) < hoid_get_bitwise_key(rhs))
1606                 return -1;
1607         if (hoid_get_bitwise_key(lhs) > hoid_get_bitwise_key(rhs))
1608                 return 1;
1609
1610         ret = compare_names(lhs->nspace, lhs->nspace_len,
1611                             rhs->nspace, rhs->nspace_len);
1612         if (ret)
1613                 return ret;
1614
1615         hoid_get_effective_key(lhs, &effective_key1, &effective_key1_len);
1616         hoid_get_effective_key(rhs, &effective_key2, &effective_key2_len);
1617         ret = compare_names(effective_key1, effective_key1_len,
1618                             effective_key2, effective_key2_len);
1619         if (ret)
1620                 return ret;
1621
1622         ret = compare_names(lhs->oid, lhs->oid_len, rhs->oid, rhs->oid_len);
1623         if (ret)
1624                 return ret;
1625
1626         if (lhs->snapid < rhs->snapid)
1627                 return -1;
1628         if (lhs->snapid > rhs->snapid)
1629                 return 1;
1630
1631         return 0;
1632 }
1633
1634 /*
1635  * For decoding ->begin and ->end of MOSDBackoff only -- no MIN/MAX
1636  * compat stuff here.
1637  *
1638  * Assumes @hoid is zero-initialized.
1639  */
1640 static int decode_hoid(void **p, void *end, struct ceph_hobject_id *hoid)
1641 {
1642         u8 struct_v;
1643         u32 struct_len;
1644         int ret;
1645
1646         ret = ceph_start_decoding(p, end, 4, "hobject_t", &struct_v,
1647                                   &struct_len);
1648         if (ret)
1649                 return ret;
1650
1651         if (struct_v < 4) {
1652                 pr_err("got struct_v %d < 4 of hobject_t\n", struct_v);
1653                 goto e_inval;
1654         }
1655
1656         hoid->key = ceph_extract_encoded_string(p, end, &hoid->key_len,
1657                                                 GFP_NOIO);
1658         if (IS_ERR(hoid->key)) {
1659                 ret = PTR_ERR(hoid->key);
1660                 hoid->key = NULL;
1661                 return ret;
1662         }
1663
1664         hoid->oid = ceph_extract_encoded_string(p, end, &hoid->oid_len,
1665                                                 GFP_NOIO);
1666         if (IS_ERR(hoid->oid)) {
1667                 ret = PTR_ERR(hoid->oid);
1668                 hoid->oid = NULL;
1669                 return ret;
1670         }
1671
1672         ceph_decode_64_safe(p, end, hoid->snapid, e_inval);
1673         ceph_decode_32_safe(p, end, hoid->hash, e_inval);
1674         ceph_decode_8_safe(p, end, hoid->is_max, e_inval);
1675
1676         hoid->nspace = ceph_extract_encoded_string(p, end, &hoid->nspace_len,
1677                                                    GFP_NOIO);
1678         if (IS_ERR(hoid->nspace)) {
1679                 ret = PTR_ERR(hoid->nspace);
1680                 hoid->nspace = NULL;
1681                 return ret;
1682         }
1683
1684         ceph_decode_64_safe(p, end, hoid->pool, e_inval);
1685
1686         ceph_hoid_build_hash_cache(hoid);
1687         return 0;
1688
1689 e_inval:
1690         return -EINVAL;
1691 }
1692
1693 static int hoid_encoding_size(const struct ceph_hobject_id *hoid)
1694 {
1695         return 8 + 4 + 1 + 8 + /* snapid, hash, is_max, pool */
1696                4 + hoid->key_len + 4 + hoid->oid_len + 4 + hoid->nspace_len;
1697 }
1698
1699 static void encode_hoid(void **p, void *end, const struct ceph_hobject_id *hoid)
1700 {
1701         ceph_start_encoding(p, 4, 3, hoid_encoding_size(hoid));
1702         ceph_encode_string(p, end, hoid->key, hoid->key_len);
1703         ceph_encode_string(p, end, hoid->oid, hoid->oid_len);
1704         ceph_encode_64(p, hoid->snapid);
1705         ceph_encode_32(p, hoid->hash);
1706         ceph_encode_8(p, hoid->is_max);
1707         ceph_encode_string(p, end, hoid->nspace, hoid->nspace_len);
1708         ceph_encode_64(p, hoid->pool);
1709 }
1710
1711 static void free_hoid(struct ceph_hobject_id *hoid)
1712 {
1713         if (hoid) {
1714                 kfree(hoid->key);
1715                 kfree(hoid->oid);
1716                 kfree(hoid->nspace);
1717                 kfree(hoid);
1718         }
1719 }
1720
1721 static struct ceph_osd_backoff *alloc_backoff(void)
1722 {
1723         struct ceph_osd_backoff *backoff;
1724
1725         backoff = kzalloc(sizeof(*backoff), GFP_NOIO);
1726         if (!backoff)
1727                 return NULL;
1728
1729         RB_CLEAR_NODE(&backoff->spg_node);
1730         RB_CLEAR_NODE(&backoff->id_node);
1731         return backoff;
1732 }
1733
1734 static void free_backoff(struct ceph_osd_backoff *backoff)
1735 {
1736         WARN_ON(!RB_EMPTY_NODE(&backoff->spg_node));
1737         WARN_ON(!RB_EMPTY_NODE(&backoff->id_node));
1738
1739         free_hoid(backoff->begin);
1740         free_hoid(backoff->end);
1741         kfree(backoff);
1742 }
1743
1744 /*
1745  * Within a specific spgid, backoffs are managed by ->begin hoid.
1746  */
1747 DEFINE_RB_INSDEL_FUNCS2(backoff, struct ceph_osd_backoff, begin, hoid_compare,
1748                         RB_BYVAL, spg_node);
1749
1750 static struct ceph_osd_backoff *lookup_containing_backoff(struct rb_root *root,
1751                                             const struct ceph_hobject_id *hoid)
1752 {
1753         struct rb_node *n = root->rb_node;
1754
1755         while (n) {
1756                 struct ceph_osd_backoff *cur =
1757                     rb_entry(n, struct ceph_osd_backoff, spg_node);
1758                 int cmp;
1759
1760                 cmp = hoid_compare(hoid, cur->begin);
1761                 if (cmp < 0) {
1762                         n = n->rb_left;
1763                 } else if (cmp > 0) {
1764                         if (hoid_compare(hoid, cur->end) < 0)
1765                                 return cur;
1766
1767                         n = n->rb_right;
1768                 } else {
1769                         return cur;
1770                 }
1771         }
1772
1773         return NULL;
1774 }
1775
1776 /*
1777  * Each backoff has a unique id within its OSD session.
1778  */
1779 DEFINE_RB_FUNCS(backoff_by_id, struct ceph_osd_backoff, id, id_node)
1780
1781 static void clear_backoffs(struct ceph_osd *osd)
1782 {
1783         while (!RB_EMPTY_ROOT(&osd->o_backoff_mappings)) {
1784                 struct ceph_spg_mapping *spg =
1785                     rb_entry(rb_first(&osd->o_backoff_mappings),
1786                              struct ceph_spg_mapping, node);
1787
1788                 while (!RB_EMPTY_ROOT(&spg->backoffs)) {
1789                         struct ceph_osd_backoff *backoff =
1790                             rb_entry(rb_first(&spg->backoffs),
1791                                      struct ceph_osd_backoff, spg_node);
1792
1793                         erase_backoff(&spg->backoffs, backoff);
1794                         erase_backoff_by_id(&osd->o_backoffs_by_id, backoff);
1795                         free_backoff(backoff);
1796                 }
1797                 erase_spg_mapping(&osd->o_backoff_mappings, spg);
1798                 free_spg_mapping(spg);
1799         }
1800 }
1801
1802 /*
1803  * Set up a temporary, non-owning view into @t.
1804  */
1805 static void hoid_fill_from_target(struct ceph_hobject_id *hoid,
1806                                   const struct ceph_osd_request_target *t)
1807 {
1808         hoid->key = NULL;
1809         hoid->key_len = 0;
1810         hoid->oid = t->target_oid.name;
1811         hoid->oid_len = t->target_oid.name_len;
1812         hoid->snapid = CEPH_NOSNAP;
1813         hoid->hash = t->pgid.seed;
1814         hoid->is_max = false;
1815         if (t->target_oloc.pool_ns) {
1816                 hoid->nspace = t->target_oloc.pool_ns->str;
1817                 hoid->nspace_len = t->target_oloc.pool_ns->len;
1818         } else {
1819                 hoid->nspace = NULL;
1820                 hoid->nspace_len = 0;
1821         }
1822         hoid->pool = t->target_oloc.pool;
1823         ceph_hoid_build_hash_cache(hoid);
1824 }
1825
1826 static bool should_plug_request(struct ceph_osd_request *req)
1827 {
1828         struct ceph_osd *osd = req->r_osd;
1829         struct ceph_spg_mapping *spg;
1830         struct ceph_osd_backoff *backoff;
1831         struct ceph_hobject_id hoid;
1832
1833         spg = lookup_spg_mapping(&osd->o_backoff_mappings, &req->r_t.spgid);
1834         if (!spg)
1835                 return false;
1836
1837         hoid_fill_from_target(&hoid, &req->r_t);
1838         backoff = lookup_containing_backoff(&spg->backoffs, &hoid);
1839         if (!backoff)
1840                 return false;
1841
1842         dout("%s req %p tid %llu backoff osd%d spgid %llu.%xs%d id %llu\n",
1843              __func__, req, req->r_tid, osd->o_osd, backoff->spgid.pgid.pool,
1844              backoff->spgid.pgid.seed, backoff->spgid.shard, backoff->id);
1845         return true;
1846 }
1847
1848 static void setup_request_data(struct ceph_osd_request *req,
1849                                struct ceph_msg *msg)
1850 {
1851         u32 data_len = 0;
1852         int i;
1853
1854         if (!list_empty(&msg->data))
1855                 return;
1856
1857         WARN_ON(msg->data_length);
1858         for (i = 0; i < req->r_num_ops; i++) {
1859                 struct ceph_osd_req_op *op = &req->r_ops[i];
1860
1861                 switch (op->op) {
1862                 /* request */
1863                 case CEPH_OSD_OP_WRITE:
1864                 case CEPH_OSD_OP_WRITEFULL:
1865                         WARN_ON(op->indata_len != op->extent.length);
1866                         ceph_osdc_msg_data_add(msg, &op->extent.osd_data);
1867                         break;
1868                 case CEPH_OSD_OP_SETXATTR:
1869                 case CEPH_OSD_OP_CMPXATTR:
1870                         WARN_ON(op->indata_len != op->xattr.name_len +
1871                                                   op->xattr.value_len);
1872                         ceph_osdc_msg_data_add(msg, &op->xattr.osd_data);
1873                         break;
1874                 case CEPH_OSD_OP_NOTIFY_ACK:
1875                         ceph_osdc_msg_data_add(msg,
1876                                                &op->notify_ack.request_data);
1877                         break;
1878
1879                 /* reply */
1880                 case CEPH_OSD_OP_STAT:
1881                         ceph_osdc_msg_data_add(req->r_reply,
1882                                                &op->raw_data_in);
1883                         break;
1884                 case CEPH_OSD_OP_READ:
1885                         ceph_osdc_msg_data_add(req->r_reply,
1886                                                &op->extent.osd_data);
1887                         break;
1888                 case CEPH_OSD_OP_LIST_WATCHERS:
1889                         ceph_osdc_msg_data_add(req->r_reply,
1890                                                &op->list_watchers.response_data);
1891                         break;
1892
1893                 /* both */
1894                 case CEPH_OSD_OP_CALL:
1895                         WARN_ON(op->indata_len != op->cls.class_len +
1896                                                   op->cls.method_len +
1897                                                   op->cls.indata_len);
1898                         ceph_osdc_msg_data_add(msg, &op->cls.request_info);
1899                         /* optional, can be NONE */
1900                         ceph_osdc_msg_data_add(msg, &op->cls.request_data);
1901                         /* optional, can be NONE */
1902                         ceph_osdc_msg_data_add(req->r_reply,
1903                                                &op->cls.response_data);
1904                         break;
1905                 case CEPH_OSD_OP_NOTIFY:
1906                         ceph_osdc_msg_data_add(msg,
1907                                                &op->notify.request_data);
1908                         ceph_osdc_msg_data_add(req->r_reply,
1909                                                &op->notify.response_data);
1910                         break;
1911                 }
1912
1913                 data_len += op->indata_len;
1914         }
1915
1916         WARN_ON(data_len != msg->data_length);
1917 }
1918
1919 static void encode_pgid(void **p, const struct ceph_pg *pgid)
1920 {
1921         ceph_encode_8(p, 1);
1922         ceph_encode_64(p, pgid->pool);
1923         ceph_encode_32(p, pgid->seed);
1924         ceph_encode_32(p, -1); /* preferred */
1925 }
1926
1927 static void encode_spgid(void **p, const struct ceph_spg *spgid)
1928 {
1929         ceph_start_encoding(p, 1, 1, CEPH_PGID_ENCODING_LEN + 1);
1930         encode_pgid(p, &spgid->pgid);
1931         ceph_encode_8(p, spgid->shard);
1932 }
1933
1934 static void encode_oloc(void **p, void *end,
1935                         const struct ceph_object_locator *oloc)
1936 {
1937         ceph_start_encoding(p, 5, 4, ceph_oloc_encoding_size(oloc));
1938         ceph_encode_64(p, oloc->pool);
1939         ceph_encode_32(p, -1); /* preferred */
1940         ceph_encode_32(p, 0);  /* key len */
1941         if (oloc->pool_ns)
1942                 ceph_encode_string(p, end, oloc->pool_ns->str,
1943                                    oloc->pool_ns->len);
1944         else
1945                 ceph_encode_32(p, 0);
1946 }
1947
1948 static void encode_request_partial(struct ceph_osd_request *req,
1949                                    struct ceph_msg *msg)
1950 {
1951         void *p = msg->front.iov_base;
1952         void *const end = p + msg->front_alloc_len;
1953         u32 data_len = 0;
1954         int i;
1955
1956         if (req->r_flags & CEPH_OSD_FLAG_WRITE) {
1957                 /* snapshots aren't writeable */
1958                 WARN_ON(req->r_snapid != CEPH_NOSNAP);
1959         } else {
1960                 WARN_ON(req->r_mtime.tv_sec || req->r_mtime.tv_nsec ||
1961                         req->r_data_offset || req->r_snapc);
1962         }
1963
1964         setup_request_data(req, msg);
1965
1966         encode_spgid(&p, &req->r_t.spgid); /* actual spg */
1967         ceph_encode_32(&p, req->r_t.pgid.seed); /* raw hash */
1968         ceph_encode_32(&p, req->r_osdc->osdmap->epoch);
1969         ceph_encode_32(&p, req->r_flags);
1970
1971         /* reqid */
1972         ceph_start_encoding(&p, 2, 2, sizeof(struct ceph_osd_reqid));
1973         memset(p, 0, sizeof(struct ceph_osd_reqid));
1974         p += sizeof(struct ceph_osd_reqid);
1975
1976         /* trace */
1977         memset(p, 0, sizeof(struct ceph_blkin_trace_info));
1978         p += sizeof(struct ceph_blkin_trace_info);
1979
1980         ceph_encode_32(&p, 0); /* client_inc, always 0 */
1981         ceph_encode_timespec64(p, &req->r_mtime);
1982         p += sizeof(struct ceph_timespec);
1983
1984         encode_oloc(&p, end, &req->r_t.target_oloc);
1985         ceph_encode_string(&p, end, req->r_t.target_oid.name,
1986                            req->r_t.target_oid.name_len);
1987
1988         /* ops, can imply data */
1989         ceph_encode_16(&p, req->r_num_ops);
1990         for (i = 0; i < req->r_num_ops; i++) {
1991                 data_len += osd_req_encode_op(p, &req->r_ops[i]);
1992                 p += sizeof(struct ceph_osd_op);
1993         }
1994
1995         ceph_encode_64(&p, req->r_snapid); /* snapid */
1996         if (req->r_snapc) {
1997                 ceph_encode_64(&p, req->r_snapc->seq);
1998                 ceph_encode_32(&p, req->r_snapc->num_snaps);
1999                 for (i = 0; i < req->r_snapc->num_snaps; i++)
2000                         ceph_encode_64(&p, req->r_snapc->snaps[i]);
2001         } else {
2002                 ceph_encode_64(&p, 0); /* snap_seq */
2003                 ceph_encode_32(&p, 0); /* snaps len */
2004         }
2005
2006         ceph_encode_32(&p, req->r_attempts); /* retry_attempt */
2007         BUG_ON(p > end - 8); /* space for features */
2008
2009         msg->hdr.version = cpu_to_le16(8); /* MOSDOp v8 */
2010         /* front_len is finalized in encode_request_finish() */
2011         msg->front.iov_len = p - msg->front.iov_base;
2012         msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
2013         msg->hdr.data_len = cpu_to_le32(data_len);
2014         /*
2015          * The header "data_off" is a hint to the receiver allowing it
2016          * to align received data into its buffers such that there's no
2017          * need to re-copy it before writing it to disk (direct I/O).
2018          */
2019         msg->hdr.data_off = cpu_to_le16(req->r_data_offset);
2020
2021         dout("%s req %p msg %p oid %s oid_len %d\n", __func__, req, msg,
2022              req->r_t.target_oid.name, req->r_t.target_oid.name_len);
2023 }
2024
2025 static void encode_request_finish(struct ceph_msg *msg)
2026 {
2027         void *p = msg->front.iov_base;
2028         void *const partial_end = p + msg->front.iov_len;
2029         void *const end = p + msg->front_alloc_len;
2030
2031         if (CEPH_HAVE_FEATURE(msg->con->peer_features, RESEND_ON_SPLIT)) {
2032                 /* luminous OSD -- encode features and be done */
2033                 p = partial_end;
2034                 ceph_encode_64(&p, msg->con->peer_features);
2035         } else {
2036                 struct {
2037                         char spgid[CEPH_ENCODING_START_BLK_LEN +
2038                                    CEPH_PGID_ENCODING_LEN + 1];
2039                         __le32 hash;
2040                         __le32 epoch;
2041                         __le32 flags;
2042                         char reqid[CEPH_ENCODING_START_BLK_LEN +
2043                                    sizeof(struct ceph_osd_reqid)];
2044                         char trace[sizeof(struct ceph_blkin_trace_info)];
2045                         __le32 client_inc;
2046                         struct ceph_timespec mtime;
2047                 } __packed head;
2048                 struct ceph_pg pgid;
2049                 void *oloc, *oid, *tail;
2050                 int oloc_len, oid_len, tail_len;
2051                 int len;
2052
2053                 /*
2054                  * Pre-luminous OSD -- reencode v8 into v4 using @head
2055                  * as a temporary buffer.  Encode the raw PG; the rest
2056                  * is just a matter of moving oloc, oid and tail blobs
2057                  * around.
2058                  */
2059                 memcpy(&head, p, sizeof(head));
2060                 p += sizeof(head);
2061
2062                 oloc = p;
2063                 p += CEPH_ENCODING_START_BLK_LEN;
2064                 pgid.pool = ceph_decode_64(&p);
2065                 p += 4 + 4; /* preferred, key len */
2066                 len = ceph_decode_32(&p);
2067                 p += len;   /* nspace */
2068                 oloc_len = p - oloc;
2069
2070                 oid = p;
2071                 len = ceph_decode_32(&p);
2072                 p += len;
2073                 oid_len = p - oid;
2074
2075                 tail = p;
2076                 tail_len = partial_end - p;
2077
2078                 p = msg->front.iov_base;
2079                 ceph_encode_copy(&p, &head.client_inc, sizeof(head.client_inc));
2080                 ceph_encode_copy(&p, &head.epoch, sizeof(head.epoch));
2081                 ceph_encode_copy(&p, &head.flags, sizeof(head.flags));
2082                 ceph_encode_copy(&p, &head.mtime, sizeof(head.mtime));
2083
2084                 /* reassert_version */
2085                 memset(p, 0, sizeof(struct ceph_eversion));
2086                 p += sizeof(struct ceph_eversion);
2087
2088                 BUG_ON(p >= oloc);
2089                 memmove(p, oloc, oloc_len);
2090                 p += oloc_len;
2091
2092                 pgid.seed = le32_to_cpu(head.hash);
2093                 encode_pgid(&p, &pgid); /* raw pg */
2094
2095                 BUG_ON(p >= oid);
2096                 memmove(p, oid, oid_len);
2097                 p += oid_len;
2098
2099                 /* tail -- ops, snapid, snapc, retry_attempt */
2100                 BUG_ON(p >= tail);
2101                 memmove(p, tail, tail_len);
2102                 p += tail_len;
2103
2104                 msg->hdr.version = cpu_to_le16(4); /* MOSDOp v4 */
2105         }
2106
2107         BUG_ON(p > end);
2108         msg->front.iov_len = p - msg->front.iov_base;
2109         msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
2110
2111         dout("%s msg %p tid %llu %u+%u+%u v%d\n", __func__, msg,
2112              le64_to_cpu(msg->hdr.tid), le32_to_cpu(msg->hdr.front_len),
2113              le32_to_cpu(msg->hdr.middle_len), le32_to_cpu(msg->hdr.data_len),
2114              le16_to_cpu(msg->hdr.version));
2115 }
2116
2117 /*
2118  * @req has to be assigned a tid and registered.
2119  */
2120 static void send_request(struct ceph_osd_request *req)
2121 {
2122         struct ceph_osd *osd = req->r_osd;
2123
2124         verify_osd_locked(osd);
2125         WARN_ON(osd->o_osd != req->r_t.osd);
2126
2127         /* backoff? */
2128         if (should_plug_request(req))
2129                 return;
2130
2131         /*
2132          * We may have a previously queued request message hanging
2133          * around.  Cancel it to avoid corrupting the msgr.
2134          */
2135         if (req->r_sent)
2136                 ceph_msg_revoke(req->r_request);
2137
2138         req->r_flags |= CEPH_OSD_FLAG_KNOWN_REDIR;
2139         if (req->r_attempts)
2140                 req->r_flags |= CEPH_OSD_FLAG_RETRY;
2141         else
2142                 WARN_ON(req->r_flags & CEPH_OSD_FLAG_RETRY);
2143
2144         encode_request_partial(req, req->r_request);
2145
2146         dout("%s req %p tid %llu to pgid %llu.%x spgid %llu.%xs%d osd%d e%u flags 0x%x attempt %d\n",
2147              __func__, req, req->r_tid, req->r_t.pgid.pool, req->r_t.pgid.seed,
2148              req->r_t.spgid.pgid.pool, req->r_t.spgid.pgid.seed,
2149              req->r_t.spgid.shard, osd->o_osd, req->r_t.epoch, req->r_flags,
2150              req->r_attempts);
2151
2152         req->r_t.paused = false;
2153         req->r_stamp = jiffies;
2154         req->r_attempts++;
2155
2156         req->r_sent = osd->o_incarnation;
2157         req->r_request->hdr.tid = cpu_to_le64(req->r_tid);
2158         ceph_con_send(&osd->o_con, ceph_msg_get(req->r_request));
2159 }
2160
2161 static void maybe_request_map(struct ceph_osd_client *osdc)
2162 {
2163         bool continuous = false;
2164
2165         verify_osdc_locked(osdc);
2166         WARN_ON(!osdc->osdmap->epoch);
2167
2168         if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
2169             ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD) ||
2170             ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR)) {
2171                 dout("%s osdc %p continuous\n", __func__, osdc);
2172                 continuous = true;
2173         } else {
2174                 dout("%s osdc %p onetime\n", __func__, osdc);
2175         }
2176
2177         if (ceph_monc_want_map(&osdc->client->monc, CEPH_SUB_OSDMAP,
2178                                osdc->osdmap->epoch + 1, continuous))
2179                 ceph_monc_renew_subs(&osdc->client->monc);
2180 }
2181
2182 static void complete_request(struct ceph_osd_request *req, int err);
2183 static void send_map_check(struct ceph_osd_request *req);
2184
2185 static void __submit_request(struct ceph_osd_request *req, bool wrlocked)
2186 {
2187         struct ceph_osd_client *osdc = req->r_osdc;
2188         struct ceph_osd *osd;
2189         enum calc_target_result ct_res;
2190         int err = 0;
2191         bool need_send = false;
2192         bool promoted = false;
2193
2194         WARN_ON(req->r_tid);
2195         dout("%s req %p wrlocked %d\n", __func__, req, wrlocked);
2196
2197 again:
2198         ct_res = calc_target(osdc, &req->r_t, NULL, false);
2199         if (ct_res == CALC_TARGET_POOL_DNE && !wrlocked)
2200                 goto promote;
2201
2202         osd = lookup_create_osd(osdc, req->r_t.osd, wrlocked);
2203         if (IS_ERR(osd)) {
2204                 WARN_ON(PTR_ERR(osd) != -EAGAIN || wrlocked);
2205                 goto promote;
2206         }
2207
2208         if (osdc->abort_err) {
2209                 dout("req %p abort_err %d\n", req, osdc->abort_err);
2210                 err = osdc->abort_err;
2211         } else if (osdc->osdmap->epoch < osdc->epoch_barrier) {
2212                 dout("req %p epoch %u barrier %u\n", req, osdc->osdmap->epoch,
2213                      osdc->epoch_barrier);
2214                 req->r_t.paused = true;
2215                 maybe_request_map(osdc);
2216         } else if ((req->r_flags & CEPH_OSD_FLAG_WRITE) &&
2217                    ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR)) {
2218                 dout("req %p pausewr\n", req);
2219                 req->r_t.paused = true;
2220                 maybe_request_map(osdc);
2221         } else if ((req->r_flags & CEPH_OSD_FLAG_READ) &&
2222                    ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD)) {
2223                 dout("req %p pauserd\n", req);
2224                 req->r_t.paused = true;
2225                 maybe_request_map(osdc);
2226         } else if ((req->r_flags & CEPH_OSD_FLAG_WRITE) &&
2227                    !(req->r_flags & (CEPH_OSD_FLAG_FULL_TRY |
2228                                      CEPH_OSD_FLAG_FULL_FORCE)) &&
2229                    (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
2230                     pool_full(osdc, req->r_t.base_oloc.pool))) {
2231                 dout("req %p full/pool_full\n", req);
2232                 if (osdc->abort_on_full) {
2233                         err = -ENOSPC;
2234                 } else {
2235                         pr_warn_ratelimited("FULL or reached pool quota\n");
2236                         req->r_t.paused = true;
2237                         maybe_request_map(osdc);
2238                 }
2239         } else if (!osd_homeless(osd)) {
2240                 need_send = true;
2241         } else {
2242                 maybe_request_map(osdc);
2243         }
2244
2245         mutex_lock(&osd->lock);
2246         /*
2247          * Assign the tid atomically with send_request() to protect
2248          * multiple writes to the same object from racing with each
2249          * other, resulting in out of order ops on the OSDs.
2250          */
2251         req->r_tid = atomic64_inc_return(&osdc->last_tid);
2252         link_request(osd, req);
2253         if (need_send)
2254                 send_request(req);
2255         else if (err)
2256                 complete_request(req, err);
2257         mutex_unlock(&osd->lock);
2258
2259         if (!err && ct_res == CALC_TARGET_POOL_DNE)
2260                 send_map_check(req);
2261
2262         if (promoted)
2263                 downgrade_write(&osdc->lock);
2264         return;
2265
2266 promote:
2267         up_read(&osdc->lock);
2268         down_write(&osdc->lock);
2269         wrlocked = true;
2270         promoted = true;
2271         goto again;
2272 }
2273
2274 static void account_request(struct ceph_osd_request *req)
2275 {
2276         WARN_ON(req->r_flags & (CEPH_OSD_FLAG_ACK | CEPH_OSD_FLAG_ONDISK));
2277         WARN_ON(!(req->r_flags & (CEPH_OSD_FLAG_READ | CEPH_OSD_FLAG_WRITE)));
2278
2279         req->r_flags |= CEPH_OSD_FLAG_ONDISK;
2280         atomic_inc(&req->r_osdc->num_requests);
2281
2282         req->r_start_stamp = jiffies;
2283 }
2284
2285 static void submit_request(struct ceph_osd_request *req, bool wrlocked)
2286 {
2287         ceph_osdc_get_request(req);
2288         account_request(req);
2289         __submit_request(req, wrlocked);
2290 }
2291
2292 static void finish_request(struct ceph_osd_request *req)
2293 {
2294         struct ceph_osd_client *osdc = req->r_osdc;
2295
2296         WARN_ON(lookup_request_mc(&osdc->map_checks, req->r_tid));
2297         dout("%s req %p tid %llu\n", __func__, req, req->r_tid);
2298
2299         if (req->r_osd)
2300                 unlink_request(req->r_osd, req);
2301         atomic_dec(&osdc->num_requests);
2302
2303         /*
2304          * If an OSD has failed or returned and a request has been sent
2305          * twice, it's possible to get a reply and end up here while the
2306          * request message is queued for delivery.  We will ignore the
2307          * reply, so not a big deal, but better to try and catch it.
2308          */
2309         ceph_msg_revoke(req->r_request);
2310         ceph_msg_revoke_incoming(req->r_reply);
2311 }
2312
2313 static void __complete_request(struct ceph_osd_request *req)
2314 {
2315         dout("%s req %p tid %llu cb %pf result %d\n", __func__, req,
2316              req->r_tid, req->r_callback, req->r_result);
2317
2318         if (req->r_callback)
2319                 req->r_callback(req);
2320         complete_all(&req->r_completion);
2321         ceph_osdc_put_request(req);
2322 }
2323
2324 static void complete_request_workfn(struct work_struct *work)
2325 {
2326         struct ceph_osd_request *req =
2327             container_of(work, struct ceph_osd_request, r_complete_work);
2328
2329         __complete_request(req);
2330 }
2331
2332 /*
2333  * This is open-coded in handle_reply().
2334  */
2335 static void complete_request(struct ceph_osd_request *req, int err)
2336 {
2337         dout("%s req %p tid %llu err %d\n", __func__, req, req->r_tid, err);
2338
2339         req->r_result = err;
2340         finish_request(req);
2341
2342         INIT_WORK(&req->r_complete_work, complete_request_workfn);
2343         queue_work(req->r_osdc->completion_wq, &req->r_complete_work);
2344 }
2345
2346 static void cancel_map_check(struct ceph_osd_request *req)
2347 {
2348         struct ceph_osd_client *osdc = req->r_osdc;
2349         struct ceph_osd_request *lookup_req;
2350
2351         verify_osdc_wrlocked(osdc);
2352
2353         lookup_req = lookup_request_mc(&osdc->map_checks, req->r_tid);
2354         if (!lookup_req)
2355                 return;
2356
2357         WARN_ON(lookup_req != req);
2358         erase_request_mc(&osdc->map_checks, req);
2359         ceph_osdc_put_request(req);
2360 }
2361
2362 static void cancel_request(struct ceph_osd_request *req)
2363 {
2364         dout("%s req %p tid %llu\n", __func__, req, req->r_tid);
2365
2366         cancel_map_check(req);
2367         finish_request(req);
2368         complete_all(&req->r_completion);
2369         ceph_osdc_put_request(req);
2370 }
2371
2372 static void abort_request(struct ceph_osd_request *req, int err)
2373 {
2374         dout("%s req %p tid %llu err %d\n", __func__, req, req->r_tid, err);
2375
2376         cancel_map_check(req);
2377         complete_request(req, err);
2378 }
2379
2380 static int abort_fn(struct ceph_osd_request *req, void *arg)
2381 {
2382         int err = *(int *)arg;
2383
2384         abort_request(req, err);
2385         return 0; /* continue iteration */
2386 }
2387
2388 /*
2389  * Abort all in-flight requests with @err and arrange for all future
2390  * requests to be failed immediately.
2391  */
2392 void ceph_osdc_abort_requests(struct ceph_osd_client *osdc, int err)
2393 {
2394         dout("%s osdc %p err %d\n", __func__, osdc, err);
2395         down_write(&osdc->lock);
2396         for_each_request(osdc, abort_fn, &err);
2397         osdc->abort_err = err;
2398         up_write(&osdc->lock);
2399 }
2400 EXPORT_SYMBOL(ceph_osdc_abort_requests);
2401
2402 static void update_epoch_barrier(struct ceph_osd_client *osdc, u32 eb)
2403 {
2404         if (likely(eb > osdc->epoch_barrier)) {
2405                 dout("updating epoch_barrier from %u to %u\n",
2406                                 osdc->epoch_barrier, eb);
2407                 osdc->epoch_barrier = eb;
2408                 /* Request map if we're not to the barrier yet */
2409                 if (eb > osdc->osdmap->epoch)
2410                         maybe_request_map(osdc);
2411         }
2412 }
2413
2414 void ceph_osdc_update_epoch_barrier(struct ceph_osd_client *osdc, u32 eb)
2415 {
2416         down_read(&osdc->lock);
2417         if (unlikely(eb > osdc->epoch_barrier)) {
2418                 up_read(&osdc->lock);
2419                 down_write(&osdc->lock);
2420                 update_epoch_barrier(osdc, eb);
2421                 up_write(&osdc->lock);
2422         } else {
2423                 up_read(&osdc->lock);
2424         }
2425 }
2426 EXPORT_SYMBOL(ceph_osdc_update_epoch_barrier);
2427
2428 /*
2429  * We can end up releasing caps as a result of abort_request().
2430  * In that case, we probably want to ensure that the cap release message
2431  * has an updated epoch barrier in it, so set the epoch barrier prior to
2432  * aborting the first request.
2433  */
2434 static int abort_on_full_fn(struct ceph_osd_request *req, void *arg)
2435 {
2436         struct ceph_osd_client *osdc = req->r_osdc;
2437         bool *victims = arg;
2438
2439         if ((req->r_flags & CEPH_OSD_FLAG_WRITE) &&
2440             (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
2441              pool_full(osdc, req->r_t.base_oloc.pool))) {
2442                 if (!*victims) {
2443                         update_epoch_barrier(osdc, osdc->osdmap->epoch);
2444                         *victims = true;
2445                 }
2446                 abort_request(req, -ENOSPC);
2447         }
2448
2449         return 0; /* continue iteration */
2450 }
2451
2452 /*
2453  * Drop all pending requests that are stalled waiting on a full condition to
2454  * clear, and complete them with ENOSPC as the return code. Set the
2455  * osdc->epoch_barrier to the latest map epoch that we've seen if any were
2456  * cancelled.
2457  */
2458 static void ceph_osdc_abort_on_full(struct ceph_osd_client *osdc)
2459 {
2460         bool victims = false;
2461
2462         if (osdc->abort_on_full &&
2463             (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) || have_pool_full(osdc)))
2464                 for_each_request(osdc, abort_on_full_fn, &victims);
2465 }
2466
2467 static void check_pool_dne(struct ceph_osd_request *req)
2468 {
2469         struct ceph_osd_client *osdc = req->r_osdc;
2470         struct ceph_osdmap *map = osdc->osdmap;
2471
2472         verify_osdc_wrlocked(osdc);
2473         WARN_ON(!map->epoch);
2474
2475         if (req->r_attempts) {
2476                 /*
2477                  * We sent a request earlier, which means that
2478                  * previously the pool existed, and now it does not
2479                  * (i.e., it was deleted).
2480                  */
2481                 req->r_map_dne_bound = map->epoch;
2482                 dout("%s req %p tid %llu pool disappeared\n", __func__, req,
2483                      req->r_tid);
2484         } else {
2485                 dout("%s req %p tid %llu map_dne_bound %u have %u\n", __func__,
2486                      req, req->r_tid, req->r_map_dne_bound, map->epoch);
2487         }
2488
2489         if (req->r_map_dne_bound) {
2490                 if (map->epoch >= req->r_map_dne_bound) {
2491                         /* we had a new enough map */
2492                         pr_info_ratelimited("tid %llu pool does not exist\n",
2493                                             req->r_tid);
2494                         complete_request(req, -ENOENT);
2495                 }
2496         } else {
2497                 send_map_check(req);
2498         }
2499 }
2500
2501 static void map_check_cb(struct ceph_mon_generic_request *greq)
2502 {
2503         struct ceph_osd_client *osdc = &greq->monc->client->osdc;
2504         struct ceph_osd_request *req;
2505         u64 tid = greq->private_data;
2506
2507         WARN_ON(greq->result || !greq->u.newest);
2508
2509         down_write(&osdc->lock);
2510         req = lookup_request_mc(&osdc->map_checks, tid);
2511         if (!req) {
2512                 dout("%s tid %llu dne\n", __func__, tid);
2513                 goto out_unlock;
2514         }
2515
2516         dout("%s req %p tid %llu map_dne_bound %u newest %llu\n", __func__,
2517              req, req->r_tid, req->r_map_dne_bound, greq->u.newest);
2518         if (!req->r_map_dne_bound)
2519                 req->r_map_dne_bound = greq->u.newest;
2520         erase_request_mc(&osdc->map_checks, req);
2521         check_pool_dne(req);
2522
2523         ceph_osdc_put_request(req);
2524 out_unlock:
2525         up_write(&osdc->lock);
2526 }
2527
2528 static void send_map_check(struct ceph_osd_request *req)
2529 {
2530         struct ceph_osd_client *osdc = req->r_osdc;
2531         struct ceph_osd_request *lookup_req;
2532         int ret;
2533
2534         verify_osdc_wrlocked(osdc);
2535
2536         lookup_req = lookup_request_mc(&osdc->map_checks, req->r_tid);
2537         if (lookup_req) {
2538                 WARN_ON(lookup_req != req);
2539                 return;
2540         }
2541
2542         ceph_osdc_get_request(req);
2543         insert_request_mc(&osdc->map_checks, req);
2544         ret = ceph_monc_get_version_async(&osdc->client->monc, "osdmap",
2545                                           map_check_cb, req->r_tid);
2546         WARN_ON(ret);
2547 }
2548
2549 /*
2550  * lingering requests, watch/notify v2 infrastructure
2551  */
2552 static void linger_release(struct kref *kref)
2553 {
2554         struct ceph_osd_linger_request *lreq =
2555             container_of(kref, struct ceph_osd_linger_request, kref);
2556
2557         dout("%s lreq %p reg_req %p ping_req %p\n", __func__, lreq,
2558              lreq->reg_req, lreq->ping_req);
2559         WARN_ON(!RB_EMPTY_NODE(&lreq->node));
2560         WARN_ON(!RB_EMPTY_NODE(&lreq->osdc_node));
2561         WARN_ON(!RB_EMPTY_NODE(&lreq->mc_node));
2562         WARN_ON(!list_empty(&lreq->scan_item));
2563         WARN_ON(!list_empty(&lreq->pending_lworks));
2564         WARN_ON(lreq->osd);
2565
2566         if (lreq->reg_req)
2567                 ceph_osdc_put_request(lreq->reg_req);
2568         if (lreq->ping_req)
2569                 ceph_osdc_put_request(lreq->ping_req);
2570         target_destroy(&lreq->t);
2571         kfree(lreq);
2572 }
2573
2574 static void linger_put(struct ceph_osd_linger_request *lreq)
2575 {
2576         if (lreq)
2577                 kref_put(&lreq->kref, linger_release);
2578 }
2579
2580 static struct ceph_osd_linger_request *
2581 linger_get(struct ceph_osd_linger_request *lreq)
2582 {
2583         kref_get(&lreq->kref);
2584         return lreq;
2585 }
2586
2587 static struct ceph_osd_linger_request *
2588 linger_alloc(struct ceph_osd_client *osdc)
2589 {
2590         struct ceph_osd_linger_request *lreq;
2591
2592         lreq = kzalloc(sizeof(*lreq), GFP_NOIO);
2593         if (!lreq)
2594                 return NULL;
2595
2596         kref_init(&lreq->kref);
2597         mutex_init(&lreq->lock);
2598         RB_CLEAR_NODE(&lreq->node);
2599         RB_CLEAR_NODE(&lreq->osdc_node);
2600         RB_CLEAR_NODE(&lreq->mc_node);
2601         INIT_LIST_HEAD(&lreq->scan_item);
2602         INIT_LIST_HEAD(&lreq->pending_lworks);
2603         init_completion(&lreq->reg_commit_wait);
2604         init_completion(&lreq->notify_finish_wait);
2605
2606         lreq->osdc = osdc;
2607         target_init(&lreq->t);
2608
2609         dout("%s lreq %p\n", __func__, lreq);
2610         return lreq;
2611 }
2612
2613 DEFINE_RB_INSDEL_FUNCS(linger, struct ceph_osd_linger_request, linger_id, node)
2614 DEFINE_RB_FUNCS(linger_osdc, struct ceph_osd_linger_request, linger_id, osdc_node)
2615 DEFINE_RB_FUNCS(linger_mc, struct ceph_osd_linger_request, linger_id, mc_node)
2616
2617 /*
2618  * Create linger request <-> OSD session relation.
2619  *
2620  * @lreq has to be registered, @osd may be homeless.
2621  */
2622 static void link_linger(struct ceph_osd *osd,
2623                         struct ceph_osd_linger_request *lreq)
2624 {
2625         verify_osd_locked(osd);
2626         WARN_ON(!lreq->linger_id || lreq->osd);
2627         dout("%s osd %p osd%d lreq %p linger_id %llu\n", __func__, osd,
2628              osd->o_osd, lreq, lreq->linger_id);
2629
2630         if (!osd_homeless(osd))
2631                 __remove_osd_from_lru(osd);
2632         else
2633                 atomic_inc(&osd->o_osdc->num_homeless);
2634
2635         get_osd(osd);
2636         insert_linger(&osd->o_linger_requests, lreq);
2637         lreq->osd = osd;
2638 }
2639
2640 static void unlink_linger(struct ceph_osd *osd,
2641                           struct ceph_osd_linger_request *lreq)
2642 {
2643         verify_osd_locked(osd);
2644         WARN_ON(lreq->osd != osd);
2645         dout("%s osd %p osd%d lreq %p linger_id %llu\n", __func__, osd,
2646              osd->o_osd, lreq, lreq->linger_id);
2647
2648         lreq->osd = NULL;
2649         erase_linger(&osd->o_linger_requests, lreq);
2650         put_osd(osd);
2651
2652         if (!osd_homeless(osd))
2653                 maybe_move_osd_to_lru(osd);
2654         else
2655                 atomic_dec(&osd->o_osdc->num_homeless);
2656 }
2657
2658 static bool __linger_registered(struct ceph_osd_linger_request *lreq)
2659 {
2660         verify_osdc_locked(lreq->osdc);
2661
2662         return !RB_EMPTY_NODE(&lreq->osdc_node);
2663 }
2664
2665 static bool linger_registered(struct ceph_osd_linger_request *lreq)
2666 {
2667         struct ceph_osd_client *osdc = lreq->osdc;
2668         bool registered;
2669
2670         down_read(&osdc->lock);
2671         registered = __linger_registered(lreq);
2672         up_read(&osdc->lock);
2673
2674         return registered;
2675 }
2676
2677 static void linger_register(struct ceph_osd_linger_request *lreq)
2678 {
2679         struct ceph_osd_client *osdc = lreq->osdc;
2680
2681         verify_osdc_wrlocked(osdc);
2682         WARN_ON(lreq->linger_id);
2683
2684         linger_get(lreq);
2685         lreq->linger_id = ++osdc->last_linger_id;
2686         insert_linger_osdc(&osdc->linger_requests, lreq);
2687 }
2688
2689 static void linger_unregister(struct ceph_osd_linger_request *lreq)
2690 {
2691         struct ceph_osd_client *osdc = lreq->osdc;
2692
2693         verify_osdc_wrlocked(osdc);
2694
2695         erase_linger_osdc(&osdc->linger_requests, lreq);
2696         linger_put(lreq);
2697 }
2698
2699 static void cancel_linger_request(struct ceph_osd_request *req)
2700 {
2701         struct ceph_osd_linger_request *lreq = req->r_priv;
2702
2703         WARN_ON(!req->r_linger);
2704         cancel_request(req);
2705         linger_put(lreq);
2706 }
2707
2708 struct linger_work {
2709         struct work_struct work;
2710         struct ceph_osd_linger_request *lreq;
2711         struct list_head pending_item;
2712         unsigned long queued_stamp;
2713
2714         union {
2715                 struct {
2716                         u64 notify_id;
2717                         u64 notifier_id;
2718                         void *payload; /* points into @msg front */
2719                         size_t payload_len;
2720
2721                         struct ceph_msg *msg; /* for ceph_msg_put() */
2722                 } notify;
2723                 struct {
2724                         int err;
2725                 } error;
2726         };
2727 };
2728
2729 static struct linger_work *lwork_alloc(struct ceph_osd_linger_request *lreq,
2730                                        work_func_t workfn)
2731 {
2732         struct linger_work *lwork;
2733
2734         lwork = kzalloc(sizeof(*lwork), GFP_NOIO);
2735         if (!lwork)
2736                 return NULL;
2737
2738         INIT_WORK(&lwork->work, workfn);
2739         INIT_LIST_HEAD(&lwork->pending_item);
2740         lwork->lreq = linger_get(lreq);
2741
2742         return lwork;
2743 }
2744
2745 static void lwork_free(struct linger_work *lwork)
2746 {
2747         struct ceph_osd_linger_request *lreq = lwork->lreq;
2748
2749         mutex_lock(&lreq->lock);
2750         list_del(&lwork->pending_item);
2751         mutex_unlock(&lreq->lock);
2752
2753         linger_put(lreq);
2754         kfree(lwork);
2755 }
2756
2757 static void lwork_queue(struct linger_work *lwork)
2758 {
2759         struct ceph_osd_linger_request *lreq = lwork->lreq;
2760         struct ceph_osd_client *osdc = lreq->osdc;
2761
2762         verify_lreq_locked(lreq);
2763         WARN_ON(!list_empty(&lwork->pending_item));
2764
2765         lwork->queued_stamp = jiffies;
2766         list_add_tail(&lwork->pending_item, &lreq->pending_lworks);
2767         queue_work(osdc->notify_wq, &lwork->work);
2768 }
2769
2770 static void do_watch_notify(struct work_struct *w)
2771 {
2772         struct linger_work *lwork = container_of(w, struct linger_work, work);
2773         struct ceph_osd_linger_request *lreq = lwork->lreq;
2774
2775         if (!linger_registered(lreq)) {
2776                 dout("%s lreq %p not registered\n", __func__, lreq);
2777                 goto out;
2778         }
2779
2780         WARN_ON(!lreq->is_watch);
2781         dout("%s lreq %p notify_id %llu notifier_id %llu payload_len %zu\n",
2782              __func__, lreq, lwork->notify.notify_id, lwork->notify.notifier_id,
2783              lwork->notify.payload_len);
2784         lreq->wcb(lreq->data, lwork->notify.notify_id, lreq->linger_id,
2785                   lwork->notify.notifier_id, lwork->notify.payload,
2786                   lwork->notify.payload_len);
2787
2788 out:
2789         ceph_msg_put(lwork->notify.msg);
2790         lwork_free(lwork);
2791 }
2792
2793 static void do_watch_error(struct work_struct *w)
2794 {
2795         struct linger_work *lwork = container_of(w, struct linger_work, work);
2796         struct ceph_osd_linger_request *lreq = lwork->lreq;
2797
2798         if (!linger_registered(lreq)) {
2799                 dout("%s lreq %p not registered\n", __func__, lreq);
2800                 goto out;
2801         }
2802
2803         dout("%s lreq %p err %d\n", __func__, lreq, lwork->error.err);
2804         lreq->errcb(lreq->data, lreq->linger_id, lwork->error.err);
2805
2806 out:
2807         lwork_free(lwork);
2808 }
2809
2810 static void queue_watch_error(struct ceph_osd_linger_request *lreq)
2811 {
2812         struct linger_work *lwork;
2813
2814         lwork = lwork_alloc(lreq, do_watch_error);
2815         if (!lwork) {
2816                 pr_err("failed to allocate error-lwork\n");
2817                 return;
2818         }
2819
2820         lwork->error.err = lreq->last_error;
2821         lwork_queue(lwork);
2822 }
2823
2824 static void linger_reg_commit_complete(struct ceph_osd_linger_request *lreq,
2825                                        int result)
2826 {
2827         if (!completion_done(&lreq->reg_commit_wait)) {
2828                 lreq->reg_commit_error = (result <= 0 ? result : 0);
2829                 complete_all(&lreq->reg_commit_wait);
2830         }
2831 }
2832
2833 static void linger_commit_cb(struct ceph_osd_request *req)
2834 {
2835         struct ceph_osd_linger_request *lreq = req->r_priv;
2836
2837         mutex_lock(&lreq->lock);
2838         dout("%s lreq %p linger_id %llu result %d\n", __func__, lreq,
2839              lreq->linger_id, req->r_result);
2840         linger_reg_commit_complete(lreq, req->r_result);
2841         lreq->committed = true;
2842
2843         if (!lreq->is_watch) {
2844                 struct ceph_osd_data *osd_data =
2845                     osd_req_op_data(req, 0, notify, response_data);
2846                 void *p = page_address(osd_data->pages[0]);
2847
2848                 WARN_ON(req->r_ops[0].op != CEPH_OSD_OP_NOTIFY ||
2849                         osd_data->type != CEPH_OSD_DATA_TYPE_PAGES);
2850
2851                 /* make note of the notify_id */
2852                 if (req->r_ops[0].outdata_len >= sizeof(u64)) {
2853                         lreq->notify_id = ceph_decode_64(&p);
2854                         dout("lreq %p notify_id %llu\n", lreq,
2855                              lreq->notify_id);
2856                 } else {
2857                         dout("lreq %p no notify_id\n", lreq);
2858                 }
2859         }
2860
2861         mutex_unlock(&lreq->lock);
2862         linger_put(lreq);
2863 }
2864
2865 static int normalize_watch_error(int err)
2866 {
2867         /*
2868          * Translate ENOENT -> ENOTCONN so that a delete->disconnection
2869          * notification and a failure to reconnect because we raced with
2870          * the delete appear the same to the user.
2871          */
2872         if (err == -ENOENT)
2873                 err = -ENOTCONN;
2874
2875         return err;
2876 }
2877
2878 static void linger_reconnect_cb(struct ceph_osd_request *req)
2879 {
2880         struct ceph_osd_linger_request *lreq = req->r_priv;
2881
2882         mutex_lock(&lreq->lock);
2883         dout("%s lreq %p linger_id %llu result %d last_error %d\n", __func__,
2884              lreq, lreq->linger_id, req->r_result, lreq->last_error);
2885         if (req->r_result < 0) {
2886                 if (!lreq->last_error) {
2887                         lreq->last_error = normalize_watch_error(req->r_result);
2888                         queue_watch_error(lreq);
2889                 }
2890         }
2891
2892         mutex_unlock(&lreq->lock);
2893         linger_put(lreq);
2894 }
2895
2896 static void send_linger(struct ceph_osd_linger_request *lreq)
2897 {
2898         struct ceph_osd_request *req = lreq->reg_req;
2899         struct ceph_osd_req_op *op = &req->r_ops[0];
2900
2901         verify_osdc_wrlocked(req->r_osdc);
2902         dout("%s lreq %p linger_id %llu\n", __func__, lreq, lreq->linger_id);
2903
2904         if (req->r_osd)
2905                 cancel_linger_request(req);
2906
2907         request_reinit(req);
2908         ceph_oid_copy(&req->r_base_oid, &lreq->t.base_oid);
2909         ceph_oloc_copy(&req->r_base_oloc, &lreq->t.base_oloc);
2910         req->r_flags = lreq->t.flags;
2911         req->r_mtime = lreq->mtime;
2912
2913         mutex_lock(&lreq->lock);
2914         if (lreq->is_watch && lreq->committed) {
2915                 WARN_ON(op->op != CEPH_OSD_OP_WATCH ||
2916                         op->watch.cookie != lreq->linger_id);
2917                 op->watch.op = CEPH_OSD_WATCH_OP_RECONNECT;
2918                 op->watch.gen = ++lreq->register_gen;
2919                 dout("lreq %p reconnect register_gen %u\n", lreq,
2920                      op->watch.gen);
2921                 req->r_callback = linger_reconnect_cb;
2922         } else {
2923                 if (!lreq->is_watch)
2924                         lreq->notify_id = 0;
2925                 else
2926                         WARN_ON(op->watch.op != CEPH_OSD_WATCH_OP_WATCH);
2927                 dout("lreq %p register\n", lreq);
2928                 req->r_callback = linger_commit_cb;
2929         }
2930         mutex_unlock(&lreq->lock);
2931
2932         req->r_priv = linger_get(lreq);
2933         req->r_linger = true;
2934
2935         submit_request(req, true);
2936 }
2937
2938 static void linger_ping_cb(struct ceph_osd_request *req)
2939 {
2940         struct ceph_osd_linger_request *lreq = req->r_priv;
2941
2942         mutex_lock(&lreq->lock);
2943         dout("%s lreq %p linger_id %llu result %d ping_sent %lu last_error %d\n",
2944              __func__, lreq, lreq->linger_id, req->r_result, lreq->ping_sent,
2945              lreq->last_error);
2946         if (lreq->register_gen == req->r_ops[0].watch.gen) {
2947                 if (!req->r_result) {
2948                         lreq->watch_valid_thru = lreq->ping_sent;
2949                 } else if (!lreq->last_error) {
2950                         lreq->last_error = normalize_watch_error(req->r_result);
2951                         queue_watch_error(lreq);
2952                 }
2953         } else {
2954                 dout("lreq %p register_gen %u ignoring old pong %u\n", lreq,
2955                      lreq->register_gen, req->r_ops[0].watch.gen);
2956         }
2957
2958         mutex_unlock(&lreq->lock);
2959         linger_put(lreq);
2960 }
2961
2962 static void send_linger_ping(struct ceph_osd_linger_request *lreq)
2963 {
2964         struct ceph_osd_client *osdc = lreq->osdc;
2965         struct ceph_osd_request *req = lreq->ping_req;
2966         struct ceph_osd_req_op *op = &req->r_ops[0];
2967
2968         if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD)) {
2969                 dout("%s PAUSERD\n", __func__);
2970                 return;
2971         }
2972
2973         lreq->ping_sent = jiffies;
2974         dout("%s lreq %p linger_id %llu ping_sent %lu register_gen %u\n",
2975              __func__, lreq, lreq->linger_id, lreq->ping_sent,
2976              lreq->register_gen);
2977
2978         if (req->r_osd)
2979                 cancel_linger_request(req);
2980
2981         request_reinit(req);
2982         target_copy(&req->r_t, &lreq->t);
2983
2984         WARN_ON(op->op != CEPH_OSD_OP_WATCH ||
2985                 op->watch.cookie != lreq->linger_id ||
2986                 op->watch.op != CEPH_OSD_WATCH_OP_PING);
2987         op->watch.gen = lreq->register_gen;
2988         req->r_callback = linger_ping_cb;
2989         req->r_priv = linger_get(lreq);
2990         req->r_linger = true;
2991
2992         ceph_osdc_get_request(req);
2993         account_request(req);
2994         req->r_tid = atomic64_inc_return(&osdc->last_tid);
2995         link_request(lreq->osd, req);
2996         send_request(req);
2997 }
2998
2999 static void linger_submit(struct ceph_osd_linger_request *lreq)
3000 {
3001         struct ceph_osd_client *osdc = lreq->osdc;
3002         struct ceph_osd *osd;
3003
3004         calc_target(osdc, &lreq->t, NULL, false);
3005         osd = lookup_create_osd(osdc, lreq->t.osd, true);
3006         link_linger(osd, lreq);
3007
3008         send_linger(lreq);
3009 }
3010
3011 static void cancel_linger_map_check(struct ceph_osd_linger_request *lreq)
3012 {
3013         struct ceph_osd_client *osdc = lreq->osdc;
3014         struct ceph_osd_linger_request *lookup_lreq;
3015
3016         verify_osdc_wrlocked(osdc);
3017
3018         lookup_lreq = lookup_linger_mc(&osdc->linger_map_checks,
3019                                        lreq->linger_id);
3020         if (!lookup_lreq)
3021                 return;
3022
3023         WARN_ON(lookup_lreq != lreq);
3024         erase_linger_mc(&osdc->linger_map_checks, lreq);
3025         linger_put(lreq);
3026 }
3027
3028 /*
3029  * @lreq has to be both registered and linked.
3030  */
3031 static void __linger_cancel(struct ceph_osd_linger_request *lreq)
3032 {
3033         if (lreq->is_watch && lreq->ping_req->r_osd)
3034                 cancel_linger_request(lreq->ping_req);
3035         if (lreq->reg_req->r_osd)
3036                 cancel_linger_request(lreq->reg_req);
3037         cancel_linger_map_check(lreq);
3038         unlink_linger(lreq->osd, lreq);
3039         linger_unregister(lreq);
3040 }
3041
3042 static void linger_cancel(struct ceph_osd_linger_request *lreq)
3043 {
3044         struct ceph_osd_client *osdc = lreq->osdc;
3045
3046         down_write(&osdc->lock);
3047         if (__linger_registered(lreq))
3048                 __linger_cancel(lreq);
3049         up_write(&osdc->lock);
3050 }
3051
3052 static void send_linger_map_check(struct ceph_osd_linger_request *lreq);
3053
3054 static void check_linger_pool_dne(struct ceph_osd_linger_request *lreq)
3055 {
3056         struct ceph_osd_client *osdc = lreq->osdc;
3057         struct ceph_osdmap *map = osdc->osdmap;
3058
3059         verify_osdc_wrlocked(osdc);
3060         WARN_ON(!map->epoch);
3061
3062         if (lreq->register_gen) {
3063                 lreq->map_dne_bound = map->epoch;
3064                 dout("%s lreq %p linger_id %llu pool disappeared\n", __func__,
3065                      lreq, lreq->linger_id);
3066         } else {
3067                 dout("%s lreq %p linger_id %llu map_dne_bound %u have %u\n",
3068                      __func__, lreq, lreq->linger_id, lreq->map_dne_bound,
3069                      map->epoch);
3070         }
3071
3072         if (lreq->map_dne_bound) {
3073                 if (map->epoch >= lreq->map_dne_bound) {
3074                         /* we had a new enough map */
3075                         pr_info("linger_id %llu pool does not exist\n",
3076                                 lreq->linger_id);
3077                         linger_reg_commit_complete(lreq, -ENOENT);
3078                         __linger_cancel(lreq);
3079                 }
3080         } else {
3081                 send_linger_map_check(lreq);
3082         }
3083 }
3084
3085 static void linger_map_check_cb(struct ceph_mon_generic_request *greq)
3086 {
3087         struct ceph_osd_client *osdc = &greq->monc->client->osdc;
3088         struct ceph_osd_linger_request *lreq;
3089         u64 linger_id = greq->private_data;
3090
3091         WARN_ON(greq->result || !greq->u.newest);
3092
3093         down_write(&osdc->lock);
3094         lreq = lookup_linger_mc(&osdc->linger_map_checks, linger_id);
3095         if (!lreq) {
3096                 dout("%s linger_id %llu dne\n", __func__, linger_id);
3097                 goto out_unlock;
3098         }
3099
3100         dout("%s lreq %p linger_id %llu map_dne_bound %u newest %llu\n",
3101              __func__, lreq, lreq->linger_id, lreq->map_dne_bound,
3102              greq->u.newest);
3103         if (!lreq->map_dne_bound)
3104                 lreq->map_dne_bound = greq->u.newest;
3105         erase_linger_mc(&osdc->linger_map_checks, lreq);
3106         check_linger_pool_dne(lreq);
3107
3108         linger_put(lreq);
3109 out_unlock:
3110         up_write(&osdc->lock);
3111 }
3112
3113 static void send_linger_map_check(struct ceph_osd_linger_request *lreq)
3114 {
3115         struct ceph_osd_client *osdc = lreq->osdc;
3116         struct ceph_osd_linger_request *lookup_lreq;
3117         int ret;
3118
3119         verify_osdc_wrlocked(osdc);
3120
3121         lookup_lreq = lookup_linger_mc(&osdc->linger_map_checks,
3122                                        lreq->linger_id);
3123         if (lookup_lreq) {
3124                 WARN_ON(lookup_lreq != lreq);
3125                 return;
3126         }
3127
3128         linger_get(lreq);
3129         insert_linger_mc(&osdc->linger_map_checks, lreq);
3130         ret = ceph_monc_get_version_async(&osdc->client->monc, "osdmap",
3131                                           linger_map_check_cb, lreq->linger_id);
3132         WARN_ON(ret);
3133 }
3134
3135 static int linger_reg_commit_wait(struct ceph_osd_linger_request *lreq)
3136 {
3137         int ret;
3138
3139         dout("%s lreq %p linger_id %llu\n", __func__, lreq, lreq->linger_id);
3140         ret = wait_for_completion_interruptible(&lreq->reg_commit_wait);
3141         return ret ?: lreq->reg_commit_error;
3142 }
3143
3144 static int linger_notify_finish_wait(struct ceph_osd_linger_request *lreq)
3145 {
3146         int ret;
3147
3148         dout("%s lreq %p linger_id %llu\n", __func__, lreq, lreq->linger_id);
3149         ret = wait_for_completion_interruptible(&lreq->notify_finish_wait);
3150         return ret ?: lreq->notify_finish_error;
3151 }
3152
3153 /*
3154  * Timeout callback, called every N seconds.  When 1 or more OSD
3155  * requests has been active for more than N seconds, we send a keepalive
3156  * (tag + timestamp) to its OSD to ensure any communications channel
3157  * reset is detected.
3158  */
3159 static void handle_timeout(struct work_struct *work)
3160 {
3161         struct ceph_osd_client *osdc =
3162                 container_of(work, struct ceph_osd_client, timeout_work.work);
3163         struct ceph_options *opts = osdc->client->options;
3164         unsigned long cutoff = jiffies - opts->osd_keepalive_timeout;
3165         unsigned long expiry_cutoff = jiffies - opts->osd_request_timeout;
3166         LIST_HEAD(slow_osds);
3167         struct rb_node *n, *p;
3168
3169         dout("%s osdc %p\n", __func__, osdc);
3170         down_write(&osdc->lock);
3171
3172         /*
3173          * ping osds that are a bit slow.  this ensures that if there
3174          * is a break in the TCP connection we will notice, and reopen
3175          * a connection with that osd (from the fault callback).
3176          */
3177         for (n = rb_first(&osdc->osds); n; n = rb_next(n)) {
3178                 struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
3179                 bool found = false;
3180
3181                 for (p = rb_first(&osd->o_requests); p; ) {
3182                         struct ceph_osd_request *req =
3183                             rb_entry(p, struct ceph_osd_request, r_node);
3184
3185                         p = rb_next(p); /* abort_request() */
3186
3187                         if (time_before(req->r_stamp, cutoff)) {
3188                                 dout(" req %p tid %llu on osd%d is laggy\n",
3189                                      req, req->r_tid, osd->o_osd);
3190                                 found = true;
3191                         }
3192                         if (opts->osd_request_timeout &&
3193                             time_before(req->r_start_stamp, expiry_cutoff)) {
3194                                 pr_err_ratelimited("tid %llu on osd%d timeout\n",
3195                                        req->r_tid, osd->o_osd);
3196                                 abort_request(req, -ETIMEDOUT);
3197                         }
3198                 }
3199                 for (p = rb_first(&osd->o_linger_requests); p; p = rb_next(p)) {
3200                         struct ceph_osd_linger_request *lreq =
3201                             rb_entry(p, struct ceph_osd_linger_request, node);
3202
3203                         dout(" lreq %p linger_id %llu is served by osd%d\n",
3204                              lreq, lreq->linger_id, osd->o_osd);
3205                         found = true;
3206
3207                         mutex_lock(&lreq->lock);
3208                         if (lreq->is_watch && lreq->committed && !lreq->last_error)
3209                                 send_linger_ping(lreq);
3210                         mutex_unlock(&lreq->lock);
3211                 }
3212
3213                 if (found)
3214                         list_move_tail(&osd->o_keepalive_item, &slow_osds);
3215         }
3216
3217         if (opts->osd_request_timeout) {
3218                 for (p = rb_first(&osdc->homeless_osd.o_requests); p; ) {
3219                         struct ceph_osd_request *req =
3220                             rb_entry(p, struct ceph_osd_request, r_node);
3221
3222                         p = rb_next(p); /* abort_request() */
3223
3224                         if (time_before(req->r_start_stamp, expiry_cutoff)) {
3225                                 pr_err_ratelimited("tid %llu on osd%d timeout\n",
3226                                        req->r_tid, osdc->homeless_osd.o_osd);
3227                                 abort_request(req, -ETIMEDOUT);
3228                         }
3229                 }
3230         }
3231
3232         if (atomic_read(&osdc->num_homeless) || !list_empty(&slow_osds))
3233                 maybe_request_map(osdc);
3234
3235         while (!list_empty(&slow_osds)) {
3236                 struct ceph_osd *osd = list_first_entry(&slow_osds,
3237                                                         struct ceph_osd,
3238                                                         o_keepalive_item);
3239                 list_del_init(&osd->o_keepalive_item);
3240                 ceph_con_keepalive(&osd->o_con);
3241         }
3242
3243         up_write(&osdc->lock);
3244         schedule_delayed_work(&osdc->timeout_work,
3245                               osdc->client->options->osd_keepalive_timeout);
3246 }
3247
3248 static void handle_osds_timeout(struct work_struct *work)
3249 {
3250         struct ceph_osd_client *osdc =
3251                 container_of(work, struct ceph_osd_client,
3252                              osds_timeout_work.work);
3253         unsigned long delay = osdc->client->options->osd_idle_ttl / 4;
3254         struct ceph_osd *osd, *nosd;
3255
3256         dout("%s osdc %p\n", __func__, osdc);
3257         down_write(&osdc->lock);
3258         list_for_each_entry_safe(osd, nosd, &osdc->osd_lru, o_osd_lru) {
3259                 if (time_before(jiffies, osd->lru_ttl))
3260                         break;
3261
3262                 WARN_ON(!RB_EMPTY_ROOT(&osd->o_requests));
3263                 WARN_ON(!RB_EMPTY_ROOT(&osd->o_linger_requests));
3264                 close_osd(osd);
3265         }
3266
3267         up_write(&osdc->lock);
3268         schedule_delayed_work(&osdc->osds_timeout_work,
3269                               round_jiffies_relative(delay));
3270 }
3271
3272 static int ceph_oloc_decode(void **p, void *end,
3273                             struct ceph_object_locator *oloc)
3274 {
3275         u8 struct_v, struct_cv;
3276         u32 len;
3277         void *struct_end;
3278         int ret = 0;
3279
3280         ceph_decode_need(p, end, 1 + 1 + 4, e_inval);
3281         struct_v = ceph_decode_8(p);
3282         struct_cv = ceph_decode_8(p);
3283         if (struct_v < 3) {
3284                 pr_warn("got v %d < 3 cv %d of ceph_object_locator\n",
3285                         struct_v, struct_cv);
3286                 goto e_inval;
3287         }
3288         if (struct_cv > 6) {
3289                 pr_warn("got v %d cv %d > 6 of ceph_object_locator\n",
3290                         struct_v, struct_cv);
3291                 goto e_inval;
3292         }
3293         len = ceph_decode_32(p);
3294         ceph_decode_need(p, end, len, e_inval);
3295         struct_end = *p + len;
3296
3297         oloc->pool = ceph_decode_64(p);
3298         *p += 4; /* skip preferred */
3299
3300         len = ceph_decode_32(p);
3301         if (len > 0) {
3302                 pr_warn("ceph_object_locator::key is set\n");
3303                 goto e_inval;
3304         }
3305
3306         if (struct_v >= 5) {
3307                 bool changed = false;
3308
3309                 len = ceph_decode_32(p);
3310                 if (len > 0) {
3311                         ceph_decode_need(p, end, len, e_inval);
3312                         if (!oloc->pool_ns ||
3313                             ceph_compare_string(oloc->pool_ns, *p, len))
3314                                 changed = true;
3315                         *p += len;
3316                 } else {
3317                         if (oloc->pool_ns)
3318                                 changed = true;
3319                 }
3320                 if (changed) {
3321                         /* redirect changes namespace */
3322                         pr_warn("ceph_object_locator::nspace is changed\n");
3323                         goto e_inval;
3324                 }
3325         }
3326
3327         if (struct_v >= 6) {
3328                 s64 hash = ceph_decode_64(p);
3329                 if (hash != -1) {
3330                         pr_warn("ceph_object_locator::hash is set\n");
3331                         goto e_inval;
3332                 }
3333         }
3334
3335         /* skip the rest */
3336         *p = struct_end;
3337 out:
3338         return ret;
3339
3340 e_inval:
3341         ret = -EINVAL;
3342         goto out;
3343 }
3344
3345 static int ceph_redirect_decode(void **p, void *end,
3346                                 struct ceph_request_redirect *redir)
3347 {
3348         u8 struct_v, struct_cv;
3349         u32 len;
3350         void *struct_end;
3351         int ret;
3352
3353         ceph_decode_need(p, end, 1 + 1 + 4, e_inval);
3354         struct_v = ceph_decode_8(p);
3355         struct_cv = ceph_decode_8(p);
3356         if (struct_cv > 1) {
3357                 pr_warn("got v %d cv %d > 1 of ceph_request_redirect\n",
3358                         struct_v, struct_cv);
3359                 goto e_inval;
3360         }
3361         len = ceph_decode_32(p);
3362         ceph_decode_need(p, end, len, e_inval);
3363         struct_end = *p + len;
3364
3365         ret = ceph_oloc_decode(p, end, &redir->oloc);
3366         if (ret)
3367                 goto out;
3368
3369         len = ceph_decode_32(p);
3370         if (len > 0) {
3371                 pr_warn("ceph_request_redirect::object_name is set\n");
3372                 goto e_inval;
3373         }
3374
3375         len = ceph_decode_32(p);
3376         *p += len; /* skip osd_instructions */
3377
3378         /* skip the rest */
3379         *p = struct_end;
3380 out:
3381         return ret;
3382
3383 e_inval:
3384         ret = -EINVAL;
3385         goto out;
3386 }
3387
3388 struct MOSDOpReply {
3389         struct ceph_pg pgid;
3390         u64 flags;
3391         int result;
3392         u32 epoch;
3393         int num_ops;
3394         u32 outdata_len[CEPH_OSD_MAX_OPS];
3395         s32 rval[CEPH_OSD_MAX_OPS];
3396         int retry_attempt;
3397         struct ceph_eversion replay_version;
3398         u64 user_version;
3399         struct ceph_request_redirect redirect;
3400 };
3401
3402 static int decode_MOSDOpReply(const struct ceph_msg *msg, struct MOSDOpReply *m)
3403 {
3404         void *p = msg->front.iov_base;
3405         void *const end = p + msg->front.iov_len;
3406         u16 version = le16_to_cpu(msg->hdr.version);
3407         struct ceph_eversion bad_replay_version;
3408         u8 decode_redir;
3409         u32 len;
3410         int ret;
3411         int i;
3412
3413         ceph_decode_32_safe(&p, end, len, e_inval);
3414         ceph_decode_need(&p, end, len, e_inval);
3415         p += len; /* skip oid */
3416
3417         ret = ceph_decode_pgid(&p, end, &m->pgid);
3418         if (ret)
3419                 return ret;
3420
3421         ceph_decode_64_safe(&p, end, m->flags, e_inval);
3422         ceph_decode_32_safe(&p, end, m->result, e_inval);
3423         ceph_decode_need(&p, end, sizeof(bad_replay_version), e_inval);
3424         memcpy(&bad_replay_version, p, sizeof(bad_replay_version));
3425         p += sizeof(bad_replay_version);
3426         ceph_decode_32_safe(&p, end, m->epoch, e_inval);
3427
3428         ceph_decode_32_safe(&p, end, m->num_ops, e_inval);
3429         if (m->num_ops > ARRAY_SIZE(m->outdata_len))
3430                 goto e_inval;
3431
3432         ceph_decode_need(&p, end, m->num_ops * sizeof(struct ceph_osd_op),
3433                          e_inval);
3434         for (i = 0; i < m->num_ops; i++) {
3435                 struct ceph_osd_op *op = p;
3436
3437                 m->outdata_len[i] = le32_to_cpu(op->payload_len);
3438                 p += sizeof(*op);
3439         }
3440
3441         ceph_decode_32_safe(&p, end, m->retry_attempt, e_inval);
3442         for (i = 0; i < m->num_ops; i++)
3443                 ceph_decode_32_safe(&p, end, m->rval[i], e_inval);
3444
3445         if (version >= 5) {
3446                 ceph_decode_need(&p, end, sizeof(m->replay_version), e_inval);
3447                 memcpy(&m->replay_version, p, sizeof(m->replay_version));
3448                 p += sizeof(m->replay_version);
3449                 ceph_decode_64_safe(&p, end, m->user_version, e_inval);
3450         } else {
3451                 m->replay_version = bad_replay_version; /* struct */
3452                 m->user_version = le64_to_cpu(m->replay_version.version);
3453         }
3454
3455         if (version >= 6) {
3456                 if (version >= 7)
3457                         ceph_decode_8_safe(&p, end, decode_redir, e_inval);
3458                 else
3459                         decode_redir = 1;
3460         } else {
3461                 decode_redir = 0;
3462         }
3463
3464         if (decode_redir) {
3465                 ret = ceph_redirect_decode(&p, end, &m->redirect);
3466                 if (ret)
3467                         return ret;
3468         } else {
3469                 ceph_oloc_init(&m->redirect.oloc);
3470         }
3471
3472         return 0;
3473
3474 e_inval:
3475         return -EINVAL;
3476 }
3477
3478 /*
3479  * Handle MOSDOpReply.  Set ->r_result and call the callback if it is
3480  * specified.
3481  */
3482 static void handle_reply(struct ceph_osd *osd, struct ceph_msg *msg)
3483 {
3484         struct ceph_osd_client *osdc = osd->o_osdc;
3485         struct ceph_osd_request *req;
3486         struct MOSDOpReply m;
3487         u64 tid = le64_to_cpu(msg->hdr.tid);
3488         u32 data_len = 0;
3489         int ret;
3490         int i;
3491
3492         dout("%s msg %p tid %llu\n", __func__, msg, tid);
3493
3494         down_read(&osdc->lock);
3495         if (!osd_registered(osd)) {
3496                 dout("%s osd%d unknown\n", __func__, osd->o_osd);
3497                 goto out_unlock_osdc;
3498         }
3499         WARN_ON(osd->o_osd != le64_to_cpu(msg->hdr.src.num));
3500
3501         mutex_lock(&osd->lock);
3502         req = lookup_request(&osd->o_requests, tid);
3503         if (!req) {
3504                 dout("%s osd%d tid %llu unknown\n", __func__, osd->o_osd, tid);
3505                 goto out_unlock_session;
3506         }
3507
3508         m.redirect.oloc.pool_ns = req->r_t.target_oloc.pool_ns;
3509         ret = decode_MOSDOpReply(msg, &m);
3510         m.redirect.oloc.pool_ns = NULL;
3511         if (ret) {
3512                 pr_err("failed to decode MOSDOpReply for tid %llu: %d\n",
3513                        req->r_tid, ret);
3514                 ceph_msg_dump(msg);
3515                 goto fail_request;
3516         }
3517         dout("%s req %p tid %llu flags 0x%llx pgid %llu.%x epoch %u attempt %d v %u'%llu uv %llu\n",
3518              __func__, req, req->r_tid, m.flags, m.pgid.pool, m.pgid.seed,
3519              m.epoch, m.retry_attempt, le32_to_cpu(m.replay_version.epoch),
3520              le64_to_cpu(m.replay_version.version), m.user_version);
3521
3522         if (m.retry_attempt >= 0) {
3523                 if (m.retry_attempt != req->r_attempts - 1) {
3524                         dout("req %p tid %llu retry_attempt %d != %d, ignoring\n",
3525                              req, req->r_tid, m.retry_attempt,
3526                              req->r_attempts - 1);
3527                         goto out_unlock_session;
3528                 }
3529         } else {
3530                 WARN_ON(1); /* MOSDOpReply v4 is assumed */
3531         }
3532
3533         if (!ceph_oloc_empty(&m.redirect.oloc)) {
3534                 dout("req %p tid %llu redirect pool %lld\n", req, req->r_tid,
3535                      m.redirect.oloc.pool);
3536                 unlink_request(osd, req);
3537                 mutex_unlock(&osd->lock);
3538
3539                 /*
3540                  * Not ceph_oloc_copy() - changing pool_ns is not
3541                  * supported.
3542                  */
3543                 req->r_t.target_oloc.pool = m.redirect.oloc.pool;
3544                 req->r_flags |= CEPH_OSD_FLAG_REDIRECTED;
3545                 req->r_tid = 0;
3546                 __submit_request(req, false);
3547                 goto out_unlock_osdc;
3548         }
3549
3550         if (m.num_ops != req->r_num_ops) {
3551                 pr_err("num_ops %d != %d for tid %llu\n", m.num_ops,
3552                        req->r_num_ops, req->r_tid);
3553                 goto fail_request;
3554         }
3555         for (i = 0; i < req->r_num_ops; i++) {
3556                 dout(" req %p tid %llu op %d rval %d len %u\n", req,
3557                      req->r_tid, i, m.rval[i], m.outdata_len[i]);
3558                 req->r_ops[i].rval = m.rval[i];
3559                 req->r_ops[i].outdata_len = m.outdata_len[i];
3560                 data_len += m.outdata_len[i];
3561         }
3562         if (data_len != le32_to_cpu(msg->hdr.data_len)) {
3563                 pr_err("sum of lens %u != %u for tid %llu\n", data_len,
3564                        le32_to_cpu(msg->hdr.data_len), req->r_tid);
3565                 goto fail_request;
3566         }
3567         dout("%s req %p tid %llu result %d data_len %u\n", __func__,
3568              req, req->r_tid, m.result, data_len);
3569
3570         /*
3571          * Since we only ever request ONDISK, we should only ever get
3572          * one (type of) reply back.
3573          */
3574         WARN_ON(!(m.flags & CEPH_OSD_FLAG_ONDISK));
3575         req->r_result = m.result ?: data_len;
3576         finish_request(req);
3577         mutex_unlock(&osd->lock);
3578         up_read(&osdc->lock);
3579
3580         __complete_request(req);
3581         return;
3582
3583 fail_request:
3584         complete_request(req, -EIO);
3585 out_unlock_session:
3586         mutex_unlock(&osd->lock);
3587 out_unlock_osdc:
3588         up_read(&osdc->lock);
3589 }
3590
3591 static void set_pool_was_full(struct ceph_osd_client *osdc)
3592 {
3593         struct rb_node *n;
3594
3595         for (n = rb_first(&osdc->osdmap->pg_pools); n; n = rb_next(n)) {
3596                 struct ceph_pg_pool_info *pi =
3597                     rb_entry(n, struct ceph_pg_pool_info, node);
3598
3599                 pi->was_full = __pool_full(pi);
3600         }
3601 }
3602
3603 static bool pool_cleared_full(struct ceph_osd_client *osdc, s64 pool_id)
3604 {
3605         struct ceph_pg_pool_info *pi;
3606
3607         pi = ceph_pg_pool_by_id(osdc->osdmap, pool_id);
3608         if (!pi)
3609                 return false;
3610
3611         return pi->was_full && !__pool_full(pi);
3612 }
3613
3614 static enum calc_target_result
3615 recalc_linger_target(struct ceph_osd_linger_request *lreq)
3616 {
3617         struct ceph_osd_client *osdc = lreq->osdc;
3618         enum calc_target_result ct_res;
3619
3620         ct_res = calc_target(osdc, &lreq->t, NULL, true);
3621         if (ct_res == CALC_TARGET_NEED_RESEND) {
3622                 struct ceph_osd *osd;
3623
3624                 osd = lookup_create_osd(osdc, lreq->t.osd, true);
3625                 if (osd != lreq->osd) {
3626                         unlink_linger(lreq->osd, lreq);
3627                         link_linger(osd, lreq);
3628                 }
3629         }
3630
3631         return ct_res;
3632 }
3633
3634 /*
3635  * Requeue requests whose mapping to an OSD has changed.
3636  */
3637 static void scan_requests(struct ceph_osd *osd,
3638                           bool force_resend,
3639                           bool cleared_full,
3640                           bool check_pool_cleared_full,
3641                           struct rb_root *need_resend,
3642                           struct list_head *need_resend_linger)
3643 {
3644         struct ceph_osd_client *osdc = osd->o_osdc;
3645         struct rb_node *n;
3646         bool force_resend_writes;
3647
3648         for (n = rb_first(&osd->o_linger_requests); n; ) {
3649                 struct ceph_osd_linger_request *lreq =
3650                     rb_entry(n, struct ceph_osd_linger_request, node);
3651                 enum calc_target_result ct_res;
3652
3653                 n = rb_next(n); /* recalc_linger_target() */
3654
3655                 dout("%s lreq %p linger_id %llu\n", __func__, lreq,
3656                      lreq->linger_id);
3657                 ct_res = recalc_linger_target(lreq);
3658                 switch (ct_res) {
3659                 case CALC_TARGET_NO_ACTION:
3660                         force_resend_writes = cleared_full ||
3661                             (check_pool_cleared_full &&
3662                              pool_cleared_full(osdc, lreq->t.base_oloc.pool));
3663                         if (!force_resend && !force_resend_writes)
3664                                 break;
3665
3666                         /* fall through */
3667                 case CALC_TARGET_NEED_RESEND:
3668                         cancel_linger_map_check(lreq);
3669                         /*
3670                          * scan_requests() for the previous epoch(s)
3671                          * may have already added it to the list, since
3672                          * it's not unlinked here.
3673                          */
3674                         if (list_empty(&lreq->scan_item))
3675                                 list_add_tail(&lreq->scan_item, need_resend_linger);
3676                         break;
3677                 case CALC_TARGET_POOL_DNE:
3678                         list_del_init(&lreq->scan_item);
3679                         check_linger_pool_dne(lreq);
3680                         break;
3681                 }
3682         }
3683
3684         for (n = rb_first(&osd->o_requests); n; ) {
3685                 struct ceph_osd_request *req =
3686                     rb_entry(n, struct ceph_osd_request, r_node);
3687                 enum calc_target_result ct_res;
3688
3689                 n = rb_next(n); /* unlink_request(), check_pool_dne() */
3690
3691                 dout("%s req %p tid %llu\n", __func__, req, req->r_tid);
3692                 ct_res = calc_target(osdc, &req->r_t, &req->r_osd->o_con,
3693                                      false);
3694                 switch (ct_res) {
3695                 case CALC_TARGET_NO_ACTION:
3696                         force_resend_writes = cleared_full ||
3697                             (check_pool_cleared_full &&
3698                              pool_cleared_full(osdc, req->r_t.base_oloc.pool));
3699                         if (!force_resend &&
3700                             (!(req->r_flags & CEPH_OSD_FLAG_WRITE) ||
3701                              !force_resend_writes))
3702                                 break;
3703
3704                         /* fall through */
3705                 case CALC_TARGET_NEED_RESEND:
3706                         cancel_map_check(req);
3707                         unlink_request(osd, req);
3708                         insert_request(need_resend, req);
3709                         break;
3710                 case CALC_TARGET_POOL_DNE:
3711                         check_pool_dne(req);
3712                         break;
3713                 }
3714         }
3715 }
3716
3717 static int handle_one_map(struct ceph_osd_client *osdc,
3718                           void *p, void *end, bool incremental,
3719                           struct rb_root *need_resend,
3720                           struct list_head *need_resend_linger)
3721 {
3722         struct ceph_osdmap *newmap;
3723         struct rb_node *n;
3724         bool skipped_map = false;
3725         bool was_full;
3726
3727         was_full = ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL);
3728         set_pool_was_full(osdc);
3729
3730         if (incremental)
3731                 newmap = osdmap_apply_incremental(&p, end, osdc->osdmap);
3732         else
3733                 newmap = ceph_osdmap_decode(&p, end);
3734         if (IS_ERR(newmap))
3735                 return PTR_ERR(newmap);
3736
3737         if (newmap != osdc->osdmap) {
3738                 /*
3739                  * Preserve ->was_full before destroying the old map.
3740                  * For pools that weren't in the old map, ->was_full
3741                  * should be false.
3742                  */
3743                 for (n = rb_first(&newmap->pg_pools); n; n = rb_next(n)) {
3744                         struct ceph_pg_pool_info *pi =
3745                             rb_entry(n, struct ceph_pg_pool_info, node);
3746                         struct ceph_pg_pool_info *old_pi;
3747
3748                         old_pi = ceph_pg_pool_by_id(osdc->osdmap, pi->id);
3749                         if (old_pi)
3750                                 pi->was_full = old_pi->was_full;
3751                         else
3752                                 WARN_ON(pi->was_full);
3753                 }
3754
3755                 if (osdc->osdmap->epoch &&
3756                     osdc->osdmap->epoch + 1 < newmap->epoch) {
3757                         WARN_ON(incremental);
3758                         skipped_map = true;
3759                 }
3760
3761                 ceph_osdmap_destroy(osdc->osdmap);
3762                 osdc->osdmap = newmap;
3763         }
3764
3765         was_full &= !ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL);
3766         scan_requests(&osdc->homeless_osd, skipped_map, was_full, true,
3767                       need_resend, need_resend_linger);
3768
3769         for (n = rb_first(&osdc->osds); n; ) {
3770                 struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
3771
3772                 n = rb_next(n); /* close_osd() */
3773
3774                 scan_requests(osd, skipped_map, was_full, true, need_resend,
3775                               need_resend_linger);
3776                 if (!ceph_osd_is_up(osdc->osdmap, osd->o_osd) ||
3777                     memcmp(&osd->o_con.peer_addr,
3778                            ceph_osd_addr(osdc->osdmap, osd->o_osd),
3779                            sizeof(struct ceph_entity_addr)))
3780                         close_osd(osd);
3781         }
3782
3783         return 0;
3784 }
3785
3786 static void kick_requests(struct ceph_osd_client *osdc,
3787                           struct rb_root *need_resend,
3788                           struct list_head *need_resend_linger)
3789 {
3790         struct ceph_osd_linger_request *lreq, *nlreq;
3791         enum calc_target_result ct_res;
3792         struct rb_node *n;
3793
3794         /* make sure need_resend targets reflect latest map */
3795         for (n = rb_first(need_resend); n; ) {
3796                 struct ceph_osd_request *req =
3797                     rb_entry(n, struct ceph_osd_request, r_node);
3798
3799                 n = rb_next(n);
3800
3801                 if (req->r_t.epoch < osdc->osdmap->epoch) {
3802                         ct_res = calc_target(osdc, &req->r_t, NULL, false);
3803                         if (ct_res == CALC_TARGET_POOL_DNE) {
3804                                 erase_request(need_resend, req);
3805                                 check_pool_dne(req);
3806                         }
3807                 }
3808         }
3809
3810         for (n = rb_first(need_resend); n; ) {
3811                 struct ceph_osd_request *req =
3812                     rb_entry(n, struct ceph_osd_request, r_node);
3813                 struct ceph_osd *osd;
3814
3815                 n = rb_next(n);
3816                 erase_request(need_resend, req); /* before link_request() */
3817
3818                 osd = lookup_create_osd(osdc, req->r_t.osd, true);
3819                 link_request(osd, req);
3820                 if (!req->r_linger) {
3821                         if (!osd_homeless(osd) && !req->r_t.paused)
3822                                 send_request(req);
3823                 } else {
3824                         cancel_linger_request(req);
3825                 }
3826         }
3827
3828         list_for_each_entry_safe(lreq, nlreq, need_resend_linger, scan_item) {
3829                 if (!osd_homeless(lreq->osd))
3830                         send_linger(lreq);
3831
3832                 list_del_init(&lreq->scan_item);
3833         }
3834 }
3835
3836 /*
3837  * Process updated osd map.
3838  *
3839  * The message contains any number of incremental and full maps, normally
3840  * indicating some sort of topology change in the cluster.  Kick requests
3841  * off to different OSDs as needed.
3842  */
3843 void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg)
3844 {
3845         void *p = msg->front.iov_base;
3846         void *const end = p + msg->front.iov_len;
3847         u32 nr_maps, maplen;
3848         u32 epoch;
3849         struct ceph_fsid fsid;
3850         struct rb_root need_resend = RB_ROOT;
3851         LIST_HEAD(need_resend_linger);
3852         bool handled_incremental = false;
3853         bool was_pauserd, was_pausewr;
3854         bool pauserd, pausewr;
3855         int err;
3856
3857         dout("%s have %u\n", __func__, osdc->osdmap->epoch);
3858         down_write(&osdc->lock);
3859
3860         /* verify fsid */
3861         ceph_decode_need(&p, end, sizeof(fsid), bad);
3862         ceph_decode_copy(&p, &fsid, sizeof(fsid));
3863         if (ceph_check_fsid(osdc->client, &fsid) < 0)
3864                 goto bad;
3865
3866         was_pauserd = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD);
3867         was_pausewr = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR) ||
3868                       ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
3869                       have_pool_full(osdc);
3870
3871         /* incremental maps */
3872         ceph_decode_32_safe(&p, end, nr_maps, bad);
3873         dout(" %d inc maps\n", nr_maps);
3874         while (nr_maps > 0) {
3875                 ceph_decode_need(&p, end, 2*sizeof(u32), bad);
3876                 epoch = ceph_decode_32(&p);
3877                 maplen = ceph_decode_32(&p);
3878                 ceph_decode_need(&p, end, maplen, bad);
3879                 if (osdc->osdmap->epoch &&
3880                     osdc->osdmap->epoch + 1 == epoch) {
3881                         dout("applying incremental map %u len %d\n",
3882                              epoch, maplen);
3883                         err = handle_one_map(osdc, p, p + maplen, true,
3884                                              &need_resend, &need_resend_linger);
3885                         if (err)
3886                                 goto bad;
3887                         handled_incremental = true;
3888                 } else {
3889                         dout("ignoring incremental map %u len %d\n",
3890                              epoch, maplen);
3891                 }
3892                 p += maplen;
3893                 nr_maps--;
3894         }
3895         if (handled_incremental)
3896                 goto done;
3897
3898         /* full maps */
3899         ceph_decode_32_safe(&p, end, nr_maps, bad);
3900         dout(" %d full maps\n", nr_maps);
3901         while (nr_maps) {
3902                 ceph_decode_need(&p, end, 2*sizeof(u32), bad);
3903                 epoch = ceph_decode_32(&p);
3904                 maplen = ceph_decode_32(&p);
3905                 ceph_decode_need(&p, end, maplen, bad);
3906                 if (nr_maps > 1) {
3907                         dout("skipping non-latest full map %u len %d\n",
3908                              epoch, maplen);
3909                 } else if (osdc->osdmap->epoch >= epoch) {
3910                         dout("skipping full map %u len %d, "
3911                              "older than our %u\n", epoch, maplen,
3912                              osdc->osdmap->epoch);
3913                 } else {
3914                         dout("taking full map %u len %d\n", epoch, maplen);
3915                         err = handle_one_map(osdc, p, p + maplen, false,
3916                                              &need_resend, &need_resend_linger);
3917                         if (err)
3918                                 goto bad;
3919                 }
3920                 p += maplen;
3921                 nr_maps--;
3922         }
3923
3924 done:
3925         /*
3926          * subscribe to subsequent osdmap updates if full to ensure
3927          * we find out when we are no longer full and stop returning
3928          * ENOSPC.
3929          */
3930         pauserd = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD);
3931         pausewr = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR) ||
3932                   ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
3933                   have_pool_full(osdc);
3934         if (was_pauserd || was_pausewr || pauserd || pausewr ||
3935             osdc->osdmap->epoch < osdc->epoch_barrier)
3936                 maybe_request_map(osdc);
3937
3938         kick_requests(osdc, &need_resend, &need_resend_linger);
3939
3940         ceph_osdc_abort_on_full(osdc);
3941         ceph_monc_got_map(&osdc->client->monc, CEPH_SUB_OSDMAP,
3942                           osdc->osdmap->epoch);
3943         up_write(&osdc->lock);
3944         wake_up_all(&osdc->client->auth_wq);
3945         return;
3946
3947 bad:
3948         pr_err("osdc handle_map corrupt msg\n");
3949         ceph_msg_dump(msg);
3950         up_write(&osdc->lock);
3951 }
3952
3953 /*
3954  * Resubmit requests pending on the given osd.
3955  */
3956 static void kick_osd_requests(struct ceph_osd *osd)
3957 {
3958         struct rb_node *n;
3959
3960         clear_backoffs(osd);
3961
3962         for (n = rb_first(&osd->o_requests); n; ) {
3963                 struct ceph_osd_request *req =
3964                     rb_entry(n, struct ceph_osd_request, r_node);
3965
3966                 n = rb_next(n); /* cancel_linger_request() */
3967
3968                 if (!req->r_linger) {
3969                         if (!req->r_t.paused)
3970                                 send_request(req);
3971                 } else {
3972                         cancel_linger_request(req);
3973                 }
3974         }
3975         for (n = rb_first(&osd->o_linger_requests); n; n = rb_next(n)) {
3976                 struct ceph_osd_linger_request *lreq =
3977                     rb_entry(n, struct ceph_osd_linger_request, node);
3978
3979                 send_linger(lreq);
3980         }
3981 }
3982
3983 /*
3984  * If the osd connection drops, we need to resubmit all requests.
3985  */
3986 static void osd_fault(struct ceph_connection *con)
3987 {
3988         struct ceph_osd *osd = con->private;
3989         struct ceph_osd_client *osdc = osd->o_osdc;
3990
3991         dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
3992
3993         down_write(&osdc->lock);
3994         if (!osd_registered(osd)) {
3995                 dout("%s osd%d unknown\n", __func__, osd->o_osd);
3996                 goto out_unlock;
3997         }
3998
3999         if (!reopen_osd(osd))
4000                 kick_osd_requests(osd);
4001         maybe_request_map(osdc);
4002
4003 out_unlock:
4004         up_write(&osdc->lock);
4005 }
4006
4007 struct MOSDBackoff {
4008         struct ceph_spg spgid;
4009         u32 map_epoch;
4010         u8 op;
4011         u64 id;
4012         struct ceph_hobject_id *begin;
4013         struct ceph_hobject_id *end;
4014 };
4015
4016 static int decode_MOSDBackoff(const struct ceph_msg *msg, struct MOSDBackoff *m)
4017 {
4018         void *p = msg->front.iov_base;
4019         void *const end = p + msg->front.iov_len;
4020         u8 struct_v;
4021         u32 struct_len;
4022         int ret;
4023
4024         ret = ceph_start_decoding(&p, end, 1, "spg_t", &struct_v, &struct_len);
4025         if (ret)
4026                 return ret;
4027
4028         ret = ceph_decode_pgid(&p, end, &m->spgid.pgid);
4029         if (ret)
4030                 return ret;
4031
4032         ceph_decode_8_safe(&p, end, m->spgid.shard, e_inval);
4033         ceph_decode_32_safe(&p, end, m->map_epoch, e_inval);
4034         ceph_decode_8_safe(&p, end, m->op, e_inval);
4035         ceph_decode_64_safe(&p, end, m->id, e_inval);
4036
4037         m->begin = kzalloc(sizeof(*m->begin), GFP_NOIO);
4038         if (!m->begin)
4039                 return -ENOMEM;
4040
4041         ret = decode_hoid(&p, end, m->begin);
4042         if (ret) {
4043                 free_hoid(m->begin);
4044                 return ret;
4045         }
4046
4047         m->end = kzalloc(sizeof(*m->end), GFP_NOIO);
4048         if (!m->end) {
4049                 free_hoid(m->begin);
4050                 return -ENOMEM;
4051         }
4052
4053         ret = decode_hoid(&p, end, m->end);
4054         if (ret) {
4055                 free_hoid(m->begin);
4056                 free_hoid(m->end);
4057                 return ret;
4058         }
4059
4060         return 0;
4061
4062 e_inval:
4063         return -EINVAL;
4064 }
4065
4066 static struct ceph_msg *create_backoff_message(
4067                                 const struct ceph_osd_backoff *backoff,
4068                                 u32 map_epoch)
4069 {
4070         struct ceph_msg *msg;
4071         void *p, *end;
4072         int msg_size;
4073
4074         msg_size = CEPH_ENCODING_START_BLK_LEN +
4075                         CEPH_PGID_ENCODING_LEN + 1; /* spgid */
4076         msg_size += 4 + 1 + 8; /* map_epoch, op, id */
4077         msg_size += CEPH_ENCODING_START_BLK_LEN +
4078                         hoid_encoding_size(backoff->begin);
4079         msg_size += CEPH_ENCODING_START_BLK_LEN +
4080                         hoid_encoding_size(backoff->end);
4081
4082         msg = ceph_msg_new(CEPH_MSG_OSD_BACKOFF, msg_size, GFP_NOIO, true);
4083         if (!msg)
4084                 return NULL;
4085
4086         p = msg->front.iov_base;
4087         end = p + msg->front_alloc_len;
4088
4089         encode_spgid(&p, &backoff->spgid);
4090         ceph_encode_32(&p, map_epoch);
4091         ceph_encode_8(&p, CEPH_OSD_BACKOFF_OP_ACK_BLOCK);
4092         ceph_encode_64(&p, backoff->id);
4093         encode_hoid(&p, end, backoff->begin);
4094         encode_hoid(&p, end, backoff->end);
4095         BUG_ON(p != end);
4096
4097         msg->front.iov_len = p - msg->front.iov_base;
4098         msg->hdr.version = cpu_to_le16(1); /* MOSDBackoff v1 */
4099         msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
4100
4101         return msg;
4102 }
4103
4104 static void handle_backoff_block(struct ceph_osd *osd, struct MOSDBackoff *m)
4105 {
4106         struct ceph_spg_mapping *spg;
4107         struct ceph_osd_backoff *backoff;
4108         struct ceph_msg *msg;
4109
4110         dout("%s osd%d spgid %llu.%xs%d id %llu\n", __func__, osd->o_osd,
4111              m->spgid.pgid.pool, m->spgid.pgid.seed, m->spgid.shard, m->id);
4112
4113         spg = lookup_spg_mapping(&osd->o_backoff_mappings, &m->spgid);
4114         if (!spg) {
4115                 spg = alloc_spg_mapping();
4116                 if (!spg) {
4117                         pr_err("%s failed to allocate spg\n", __func__);
4118                         return;
4119                 }
4120                 spg->spgid = m->spgid; /* struct */
4121                 insert_spg_mapping(&osd->o_backoff_mappings, spg);
4122         }
4123
4124         backoff = alloc_backoff();
4125         if (!backoff) {
4126                 pr_err("%s failed to allocate backoff\n", __func__);
4127                 return;
4128         }
4129         backoff->spgid = m->spgid; /* struct */
4130         backoff->id = m->id;
4131         backoff->begin = m->begin;
4132         m->begin = NULL; /* backoff now owns this */
4133         backoff->end = m->end;
4134         m->end = NULL;   /* ditto */
4135
4136         insert_backoff(&spg->backoffs, backoff);
4137         insert_backoff_by_id(&osd->o_backoffs_by_id, backoff);
4138
4139         /*
4140          * Ack with original backoff's epoch so that the OSD can
4141          * discard this if there was a PG split.
4142          */
4143         msg = create_backoff_message(backoff, m->map_epoch);
4144         if (!msg) {
4145                 pr_err("%s failed to allocate msg\n", __func__);
4146                 return;
4147         }
4148         ceph_con_send(&osd->o_con, msg);
4149 }
4150
4151 static bool target_contained_by(const struct ceph_osd_request_target *t,
4152                                 const struct ceph_hobject_id *begin,
4153                                 const struct ceph_hobject_id *end)
4154 {
4155         struct ceph_hobject_id hoid;
4156         int cmp;
4157
4158         hoid_fill_from_target(&hoid, t);
4159         cmp = hoid_compare(&hoid, begin);
4160         return !cmp || (cmp > 0 && hoid_compare(&hoid, end) < 0);
4161 }
4162
4163 static void handle_backoff_unblock(struct ceph_osd *osd,
4164                                    const struct MOSDBackoff *m)
4165 {
4166         struct ceph_spg_mapping *spg;
4167         struct ceph_osd_backoff *backoff;
4168         struct rb_node *n;
4169
4170         dout("%s osd%d spgid %llu.%xs%d id %llu\n", __func__, osd->o_osd,
4171              m->spgid.pgid.pool, m->spgid.pgid.seed, m->spgid.shard, m->id);
4172
4173         backoff = lookup_backoff_by_id(&osd->o_backoffs_by_id, m->id);
4174         if (!backoff) {
4175                 pr_err("%s osd%d spgid %llu.%xs%d id %llu backoff dne\n",
4176                        __func__, osd->o_osd, m->spgid.pgid.pool,
4177                        m->spgid.pgid.seed, m->spgid.shard, m->id);
4178                 return;
4179         }
4180
4181         if (hoid_compare(backoff->begin, m->begin) &&
4182             hoid_compare(backoff->end, m->end)) {
4183                 pr_err("%s osd%d spgid %llu.%xs%d id %llu bad range?\n",
4184                        __func__, osd->o_osd, m->spgid.pgid.pool,
4185                        m->spgid.pgid.seed, m->spgid.shard, m->id);
4186                 /* unblock it anyway... */
4187         }
4188
4189         spg = lookup_spg_mapping(&osd->o_backoff_mappings, &backoff->spgid);
4190         BUG_ON(!spg);
4191
4192         erase_backoff(&spg->backoffs, backoff);
4193         erase_backoff_by_id(&osd->o_backoffs_by_id, backoff);
4194         free_backoff(backoff);
4195
4196         if (RB_EMPTY_ROOT(&spg->backoffs)) {
4197                 erase_spg_mapping(&osd->o_backoff_mappings, spg);
4198                 free_spg_mapping(spg);
4199         }
4200
4201         for (n = rb_first(&osd->o_requests); n; n = rb_next(n)) {
4202                 struct ceph_osd_request *req =
4203                     rb_entry(n, struct ceph_osd_request, r_node);
4204
4205                 if (!ceph_spg_compare(&req->r_t.spgid, &m->spgid)) {
4206                         /*
4207                          * Match against @m, not @backoff -- the PG may
4208                          * have split on the OSD.
4209                          */
4210                         if (target_contained_by(&req->r_t, m->begin, m->end)) {
4211                                 /*
4212                                  * If no other installed backoff applies,
4213                                  * resend.
4214                                  */
4215                                 send_request(req);
4216                         }
4217                 }
4218         }
4219 }
4220
4221 static void handle_backoff(struct ceph_osd *osd, struct ceph_msg *msg)
4222 {
4223         struct ceph_osd_client *osdc = osd->o_osdc;
4224         struct MOSDBackoff m;
4225         int ret;
4226
4227         down_read(&osdc->lock);
4228         if (!osd_registered(osd)) {
4229                 dout("%s osd%d unknown\n", __func__, osd->o_osd);
4230                 up_read(&osdc->lock);
4231                 return;
4232         }
4233         WARN_ON(osd->o_osd != le64_to_cpu(msg->hdr.src.num));
4234
4235         mutex_lock(&osd->lock);
4236         ret = decode_MOSDBackoff(msg, &m);
4237         if (ret) {
4238                 pr_err("failed to decode MOSDBackoff: %d\n", ret);
4239                 ceph_msg_dump(msg);
4240                 goto out_unlock;
4241         }
4242
4243         switch (m.op) {
4244         case CEPH_OSD_BACKOFF_OP_BLOCK:
4245                 handle_backoff_block(osd, &m);
4246                 break;
4247         case CEPH_OSD_BACKOFF_OP_UNBLOCK:
4248                 handle_backoff_unblock(osd, &m);
4249                 break;
4250         default:
4251                 pr_err("%s osd%d unknown op %d\n", __func__, osd->o_osd, m.op);
4252         }
4253
4254         free_hoid(m.begin);
4255         free_hoid(m.end);
4256
4257 out_unlock:
4258         mutex_unlock(&osd->lock);
4259         up_read(&osdc->lock);
4260 }
4261
4262 /*
4263  * Process osd watch notifications
4264  */
4265 static void handle_watch_notify(struct ceph_osd_client *osdc,
4266                                 struct ceph_msg *msg)
4267 {
4268         void *p = msg->front.iov_base;
4269         void *const end = p + msg->front.iov_len;
4270         struct ceph_osd_linger_request *lreq;
4271         struct linger_work *lwork;
4272         u8 proto_ver, opcode;
4273         u64 cookie, notify_id;
4274         u64 notifier_id = 0;
4275         s32 return_code = 0;
4276         void *payload = NULL;
4277         u32 payload_len = 0;
4278
4279         ceph_decode_8_safe(&p, end, proto_ver, bad);
4280         ceph_decode_8_safe(&p, end, opcode, bad);
4281         ceph_decode_64_safe(&p, end, cookie, bad);
4282         p += 8; /* skip ver */
4283         ceph_decode_64_safe(&p, end, notify_id, bad);
4284
4285         if (proto_ver >= 1) {
4286                 ceph_decode_32_safe(&p, end, payload_len, bad);
4287                 ceph_decode_need(&p, end, payload_len, bad);
4288                 payload = p;
4289                 p += payload_len;
4290         }
4291
4292         if (le16_to_cpu(msg->hdr.version) >= 2)
4293                 ceph_decode_32_safe(&p, end, return_code, bad);
4294
4295         if (le16_to_cpu(msg->hdr.version) >= 3)
4296                 ceph_decode_64_safe(&p, end, notifier_id, bad);
4297
4298         down_read(&osdc->lock);
4299         lreq = lookup_linger_osdc(&osdc->linger_requests, cookie);
4300         if (!lreq) {
4301                 dout("%s opcode %d cookie %llu dne\n", __func__, opcode,
4302                      cookie);
4303                 goto out_unlock_osdc;
4304         }
4305
4306         mutex_lock(&lreq->lock);
4307         dout("%s opcode %d cookie %llu lreq %p is_watch %d\n", __func__,
4308              opcode, cookie, lreq, lreq->is_watch);
4309         if (opcode == CEPH_WATCH_EVENT_DISCONNECT) {
4310                 if (!lreq->last_error) {
4311                         lreq->last_error = -ENOTCONN;
4312                         queue_watch_error(lreq);
4313                 }
4314         } else if (!lreq->is_watch) {
4315                 /* CEPH_WATCH_EVENT_NOTIFY_COMPLETE */
4316                 if (lreq->notify_id && lreq->notify_id != notify_id) {
4317                         dout("lreq %p notify_id %llu != %llu, ignoring\n", lreq,
4318                              lreq->notify_id, notify_id);
4319                 } else if (!completion_done(&lreq->notify_finish_wait)) {
4320                         struct ceph_msg_data *data =
4321                             list_first_entry_or_null(&msg->data,
4322                                                      struct ceph_msg_data,
4323                                                      links);
4324
4325                         if (data) {
4326                                 if (lreq->preply_pages) {
4327                                         WARN_ON(data->type !=
4328                                                         CEPH_MSG_DATA_PAGES);
4329                                         *lreq->preply_pages = data->pages;
4330                                         *lreq->preply_len = data->length;
4331                                 } else {
4332                                         ceph_release_page_vector(data->pages,
4333                                                calc_pages_for(0, data->length));
4334                                 }
4335                         }
4336                         lreq->notify_finish_error = return_code;
4337                         complete_all(&lreq->notify_finish_wait);
4338                 }
4339         } else {
4340                 /* CEPH_WATCH_EVENT_NOTIFY */
4341                 lwork = lwork_alloc(lreq, do_watch_notify);
4342                 if (!lwork) {
4343                         pr_err("failed to allocate notify-lwork\n");
4344                         goto out_unlock_lreq;
4345                 }
4346
4347                 lwork->notify.notify_id = notify_id;
4348                 lwork->notify.notifier_id = notifier_id;
4349                 lwork->notify.payload = payload;
4350                 lwork->notify.payload_len = payload_len;
4351                 lwork->notify.msg = ceph_msg_get(msg);
4352                 lwork_queue(lwork);
4353         }
4354
4355 out_unlock_lreq:
4356         mutex_unlock(&lreq->lock);
4357 out_unlock_osdc:
4358         up_read(&osdc->lock);
4359         return;
4360
4361 bad:
4362         pr_err("osdc handle_watch_notify corrupt msg\n");
4363 }
4364
4365 /*
4366  * Register request, send initial attempt.
4367  */
4368 int ceph_osdc_start_request(struct ceph_osd_client *osdc,
4369                             struct ceph_osd_request *req,
4370                             bool nofail)
4371 {
4372         down_read(&osdc->lock);
4373         submit_request(req, false);
4374         up_read(&osdc->lock);
4375
4376         return 0;
4377 }
4378 EXPORT_SYMBOL(ceph_osdc_start_request);
4379
4380 /*
4381  * Unregister a registered request.  The request is not completed:
4382  * ->r_result isn't set and __complete_request() isn't called.
4383  */
4384 void ceph_osdc_cancel_request(struct ceph_osd_request *req)
4385 {
4386         struct ceph_osd_client *osdc = req->r_osdc;
4387
4388         down_write(&osdc->lock);
4389         if (req->r_osd)
4390                 cancel_request(req);
4391         up_write(&osdc->lock);
4392 }
4393 EXPORT_SYMBOL(ceph_osdc_cancel_request);
4394
4395 /*
4396  * @timeout: in jiffies, 0 means "wait forever"
4397  */
4398 static int wait_request_timeout(struct ceph_osd_request *req,
4399                                 unsigned long timeout)
4400 {
4401         long left;
4402
4403         dout("%s req %p tid %llu\n", __func__, req, req->r_tid);
4404         left = wait_for_completion_killable_timeout(&req->r_completion,
4405                                                 ceph_timeout_jiffies(timeout));
4406         if (left <= 0) {
4407                 left = left ?: -ETIMEDOUT;
4408                 ceph_osdc_cancel_request(req);
4409         } else {
4410                 left = req->r_result; /* completed */
4411         }
4412
4413         return left;
4414 }
4415
4416 /*
4417  * wait for a request to complete
4418  */
4419 int ceph_osdc_wait_request(struct ceph_osd_client *osdc,
4420                            struct ceph_osd_request *req)
4421 {
4422         return wait_request_timeout(req, 0);
4423 }
4424 EXPORT_SYMBOL(ceph_osdc_wait_request);
4425
4426 /*
4427  * sync - wait for all in-flight requests to flush.  avoid starvation.
4428  */
4429 void ceph_osdc_sync(struct ceph_osd_client *osdc)
4430 {
4431         struct rb_node *n, *p;
4432         u64 last_tid = atomic64_read(&osdc->last_tid);
4433
4434 again:
4435         down_read(&osdc->lock);
4436         for (n = rb_first(&osdc->osds); n; n = rb_next(n)) {
4437                 struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
4438
4439                 mutex_lock(&osd->lock);
4440                 for (p = rb_first(&osd->o_requests); p; p = rb_next(p)) {
4441                         struct ceph_osd_request *req =
4442                             rb_entry(p, struct ceph_osd_request, r_node);
4443
4444                         if (req->r_tid > last_tid)
4445                                 break;
4446
4447                         if (!(req->r_flags & CEPH_OSD_FLAG_WRITE))
4448                                 continue;
4449
4450                         ceph_osdc_get_request(req);
4451                         mutex_unlock(&osd->lock);
4452                         up_read(&osdc->lock);
4453                         dout("%s waiting on req %p tid %llu last_tid %llu\n",
4454                              __func__, req, req->r_tid, last_tid);
4455                         wait_for_completion(&req->r_completion);
4456                         ceph_osdc_put_request(req);
4457                         goto again;
4458                 }
4459
4460                 mutex_unlock(&osd->lock);
4461         }
4462
4463         up_read(&osdc->lock);
4464         dout("%s done last_tid %llu\n", __func__, last_tid);
4465 }
4466 EXPORT_SYMBOL(ceph_osdc_sync);
4467
4468 static struct ceph_osd_request *
4469 alloc_linger_request(struct ceph_osd_linger_request *lreq)
4470 {
4471         struct ceph_osd_request *req;
4472
4473         req = ceph_osdc_alloc_request(lreq->osdc, NULL, 1, false, GFP_NOIO);
4474         if (!req)
4475                 return NULL;
4476
4477         ceph_oid_copy(&req->r_base_oid, &lreq->t.base_oid);
4478         ceph_oloc_copy(&req->r_base_oloc, &lreq->t.base_oloc);
4479
4480         if (ceph_osdc_alloc_messages(req, GFP_NOIO)) {
4481                 ceph_osdc_put_request(req);
4482                 return NULL;
4483         }
4484
4485         return req;
4486 }
4487
4488 /*
4489  * Returns a handle, caller owns a ref.
4490  */
4491 struct ceph_osd_linger_request *
4492 ceph_osdc_watch(struct ceph_osd_client *osdc,
4493                 struct ceph_object_id *oid,
4494                 struct ceph_object_locator *oloc,
4495                 rados_watchcb2_t wcb,
4496                 rados_watcherrcb_t errcb,
4497                 void *data)
4498 {
4499         struct ceph_osd_linger_request *lreq;
4500         int ret;
4501
4502         lreq = linger_alloc(osdc);
4503         if (!lreq)
4504                 return ERR_PTR(-ENOMEM);
4505
4506         lreq->is_watch = true;
4507         lreq->wcb = wcb;
4508         lreq->errcb = errcb;
4509         lreq->data = data;
4510         lreq->watch_valid_thru = jiffies;
4511
4512         ceph_oid_copy(&lreq->t.base_oid, oid);
4513         ceph_oloc_copy(&lreq->t.base_oloc, oloc);
4514         lreq->t.flags = CEPH_OSD_FLAG_WRITE;
4515         ktime_get_real_ts64(&lreq->mtime);
4516
4517         lreq->reg_req = alloc_linger_request(lreq);
4518         if (!lreq->reg_req) {
4519                 ret = -ENOMEM;
4520                 goto err_put_lreq;
4521         }
4522
4523         lreq->ping_req = alloc_linger_request(lreq);
4524         if (!lreq->ping_req) {
4525                 ret = -ENOMEM;
4526                 goto err_put_lreq;
4527         }
4528
4529         down_write(&osdc->lock);
4530         linger_register(lreq); /* before osd_req_op_* */
4531         osd_req_op_watch_init(lreq->reg_req, 0, lreq->linger_id,
4532                               CEPH_OSD_WATCH_OP_WATCH);
4533         osd_req_op_watch_init(lreq->ping_req, 0, lreq->linger_id,
4534                               CEPH_OSD_WATCH_OP_PING);
4535         linger_submit(lreq);
4536         up_write(&osdc->lock);
4537
4538         ret = linger_reg_commit_wait(lreq);
4539         if (ret) {
4540                 linger_cancel(lreq);
4541                 goto err_put_lreq;
4542         }
4543
4544         return lreq;
4545
4546 err_put_lreq:
4547         linger_put(lreq);
4548         return ERR_PTR(ret);
4549 }
4550 EXPORT_SYMBOL(ceph_osdc_watch);
4551
4552 /*
4553  * Releases a ref.
4554  *
4555  * Times out after mount_timeout to preserve rbd unmap behaviour
4556  * introduced in 2894e1d76974 ("rbd: timeout watch teardown on unmap
4557  * with mount_timeout").
4558  */
4559 int ceph_osdc_unwatch(struct ceph_osd_client *osdc,
4560                       struct ceph_osd_linger_request *lreq)
4561 {
4562         struct ceph_options *opts = osdc->client->options;
4563         struct ceph_osd_request *req;
4564         int ret;
4565
4566         req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO);
4567         if (!req)
4568                 return -ENOMEM;
4569
4570         ceph_oid_copy(&req->r_base_oid, &lreq->t.base_oid);
4571         ceph_oloc_copy(&req->r_base_oloc, &lreq->t.base_oloc);
4572         req->r_flags = CEPH_OSD_FLAG_WRITE;
4573         ktime_get_real_ts64(&req->r_mtime);
4574         osd_req_op_watch_init(req, 0, lreq->linger_id,
4575                               CEPH_OSD_WATCH_OP_UNWATCH);
4576
4577         ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
4578         if (ret)
4579                 goto out_put_req;
4580
4581         ceph_osdc_start_request(osdc, req, false);
4582         linger_cancel(lreq);
4583         linger_put(lreq);
4584         ret = wait_request_timeout(req, opts->mount_timeout);
4585
4586 out_put_req:
4587         ceph_osdc_put_request(req);
4588         return ret;
4589 }
4590 EXPORT_SYMBOL(ceph_osdc_unwatch);
4591
4592 static int osd_req_op_notify_ack_init(struct ceph_osd_request *req, int which,
4593                                       u64 notify_id, u64 cookie, void *payload,
4594                                       u32 payload_len)
4595 {
4596         struct ceph_osd_req_op *op;
4597         struct ceph_pagelist *pl;
4598         int ret;
4599
4600         op = _osd_req_op_init(req, which, CEPH_OSD_OP_NOTIFY_ACK, 0);
4601
4602         pl = kmalloc(sizeof(*pl), GFP_NOIO);
4603         if (!pl)
4604                 return -ENOMEM;
4605
4606         ceph_pagelist_init(pl);
4607         ret = ceph_pagelist_encode_64(pl, notify_id);
4608         ret |= ceph_pagelist_encode_64(pl, cookie);
4609         if (payload) {
4610                 ret |= ceph_pagelist_encode_32(pl, payload_len);
4611                 ret |= ceph_pagelist_append(pl, payload, payload_len);
4612         } else {
4613                 ret |= ceph_pagelist_encode_32(pl, 0);
4614         }
4615         if (ret) {
4616                 ceph_pagelist_release(pl);
4617                 return -ENOMEM;
4618         }
4619
4620         ceph_osd_data_pagelist_init(&op->notify_ack.request_data, pl);
4621         op->indata_len = pl->length;
4622         return 0;
4623 }
4624
4625 int ceph_osdc_notify_ack(struct ceph_osd_client *osdc,
4626                          struct ceph_object_id *oid,
4627                          struct ceph_object_locator *oloc,
4628                          u64 notify_id,
4629                          u64 cookie,
4630                          void *payload,
4631                          u32 payload_len)
4632 {
4633         struct ceph_osd_request *req;
4634         int ret;
4635
4636         req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO);
4637         if (!req)
4638                 return -ENOMEM;
4639
4640         ceph_oid_copy(&req->r_base_oid, oid);
4641         ceph_oloc_copy(&req->r_base_oloc, oloc);
4642         req->r_flags = CEPH_OSD_FLAG_READ;
4643
4644         ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
4645         if (ret)
4646                 goto out_put_req;
4647
4648         ret = osd_req_op_notify_ack_init(req, 0, notify_id, cookie, payload,
4649                                          payload_len);
4650         if (ret)
4651                 goto out_put_req;
4652
4653         ceph_osdc_start_request(osdc, req, false);
4654         ret = ceph_osdc_wait_request(osdc, req);
4655
4656 out_put_req:
4657         ceph_osdc_put_request(req);
4658         return ret;
4659 }
4660 EXPORT_SYMBOL(ceph_osdc_notify_ack);
4661
4662 static int osd_req_op_notify_init(struct ceph_osd_request *req, int which,
4663                                   u64 cookie, u32 prot_ver, u32 timeout,
4664                                   void *payload, u32 payload_len)
4665 {
4666         struct ceph_osd_req_op *op;
4667         struct ceph_pagelist *pl;
4668         int ret;
4669
4670         op = _osd_req_op_init(req, which, CEPH_OSD_OP_NOTIFY, 0);
4671         op->notify.cookie = cookie;
4672
4673         pl = kmalloc(sizeof(*pl), GFP_NOIO);
4674         if (!pl)
4675                 return -ENOMEM;
4676
4677         ceph_pagelist_init(pl);
4678         ret = ceph_pagelist_encode_32(pl, 1); /* prot_ver */
4679         ret |= ceph_pagelist_encode_32(pl, timeout);
4680         ret |= ceph_pagelist_encode_32(pl, payload_len);
4681         ret |= ceph_pagelist_append(pl, payload, payload_len);
4682         if (ret) {
4683                 ceph_pagelist_release(pl);
4684                 return -ENOMEM;
4685         }
4686
4687         ceph_osd_data_pagelist_init(&op->notify.request_data, pl);
4688         op->indata_len = pl->length;
4689         return 0;
4690 }
4691
4692 /*
4693  * @timeout: in seconds
4694  *
4695  * @preply_{pages,len} are initialized both on success and error.
4696  * The caller is responsible for:
4697  *
4698  *     ceph_release_page_vector(reply_pages, calc_pages_for(0, reply_len))
4699  */
4700 int ceph_osdc_notify(struct ceph_osd_client *osdc,
4701                      struct ceph_object_id *oid,
4702                      struct ceph_object_locator *oloc,
4703                      void *payload,
4704                      u32 payload_len,
4705                      u32 timeout,
4706                      struct page ***preply_pages,
4707                      size_t *preply_len)
4708 {
4709         struct ceph_osd_linger_request *lreq;
4710         struct page **pages;
4711         int ret;
4712
4713         WARN_ON(!timeout);
4714         if (preply_pages) {
4715                 *preply_pages = NULL;
4716                 *preply_len = 0;
4717         }
4718
4719         lreq = linger_alloc(osdc);
4720         if (!lreq)
4721                 return -ENOMEM;
4722
4723         lreq->preply_pages = preply_pages;
4724         lreq->preply_len = preply_len;
4725
4726         ceph_oid_copy(&lreq->t.base_oid, oid);
4727         ceph_oloc_copy(&lreq->t.base_oloc, oloc);
4728         lreq->t.flags = CEPH_OSD_FLAG_READ;
4729
4730         lreq->reg_req = alloc_linger_request(lreq);
4731         if (!lreq->reg_req) {
4732                 ret = -ENOMEM;
4733                 goto out_put_lreq;
4734         }
4735
4736         /* for notify_id */
4737         pages = ceph_alloc_page_vector(1, GFP_NOIO);
4738         if (IS_ERR(pages)) {
4739                 ret = PTR_ERR(pages);
4740                 goto out_put_lreq;
4741         }
4742
4743         down_write(&osdc->lock);
4744         linger_register(lreq); /* before osd_req_op_* */
4745         ret = osd_req_op_notify_init(lreq->reg_req, 0, lreq->linger_id, 1,
4746                                      timeout, payload, payload_len);
4747         if (ret) {
4748                 linger_unregister(lreq);
4749                 up_write(&osdc->lock);
4750                 ceph_release_page_vector(pages, 1);
4751                 goto out_put_lreq;
4752         }
4753         ceph_osd_data_pages_init(osd_req_op_data(lreq->reg_req, 0, notify,
4754                                                  response_data),
4755                                  pages, PAGE_SIZE, 0, false, true);
4756         linger_submit(lreq);
4757         up_write(&osdc->lock);
4758
4759         ret = linger_reg_commit_wait(lreq);
4760         if (!ret)
4761                 ret = linger_notify_finish_wait(lreq);
4762         else
4763                 dout("lreq %p failed to initiate notify %d\n", lreq, ret);
4764
4765         linger_cancel(lreq);
4766 out_put_lreq:
4767         linger_put(lreq);
4768         return ret;
4769 }
4770 EXPORT_SYMBOL(ceph_osdc_notify);
4771
4772 /*
4773  * Return the number of milliseconds since the watch was last
4774  * confirmed, or an error.  If there is an error, the watch is no
4775  * longer valid, and should be destroyed with ceph_osdc_unwatch().
4776  */
4777 int ceph_osdc_watch_check(struct ceph_osd_client *osdc,
4778                           struct ceph_osd_linger_request *lreq)
4779 {
4780         unsigned long stamp, age;
4781         int ret;
4782
4783         down_read(&osdc->lock);
4784         mutex_lock(&lreq->lock);
4785         stamp = lreq->watch_valid_thru;
4786         if (!list_empty(&lreq->pending_lworks)) {
4787                 struct linger_work *lwork =
4788                     list_first_entry(&lreq->pending_lworks,
4789                                      struct linger_work,
4790                                      pending_item);
4791
4792                 if (time_before(lwork->queued_stamp, stamp))
4793                         stamp = lwork->queued_stamp;
4794         }
4795         age = jiffies - stamp;
4796         dout("%s lreq %p linger_id %llu age %lu last_error %d\n", __func__,
4797              lreq, lreq->linger_id, age, lreq->last_error);
4798         /* we are truncating to msecs, so return a safe upper bound */
4799         ret = lreq->last_error ?: 1 + jiffies_to_msecs(age);
4800
4801         mutex_unlock(&lreq->lock);
4802         up_read(&osdc->lock);
4803         return ret;
4804 }
4805
4806 static int decode_watcher(void **p, void *end, struct ceph_watch_item *item)
4807 {
4808         u8 struct_v;
4809         u32 struct_len;
4810         int ret;
4811
4812         ret = ceph_start_decoding(p, end, 2, "watch_item_t",
4813                                   &struct_v, &struct_len);
4814         if (ret)
4815                 return ret;
4816
4817         ceph_decode_copy(p, &item->name, sizeof(item->name));
4818         item->cookie = ceph_decode_64(p);
4819         *p += 4; /* skip timeout_seconds */
4820         if (struct_v >= 2) {
4821                 ceph_decode_copy(p, &item->addr, sizeof(item->addr));
4822                 ceph_decode_addr(&item->addr);
4823         }
4824
4825         dout("%s %s%llu cookie %llu addr %s\n", __func__,
4826              ENTITY_NAME(item->name), item->cookie,
4827              ceph_pr_addr(&item->addr.in_addr));
4828         return 0;
4829 }
4830
4831 static int decode_watchers(void **p, void *end,
4832                            struct ceph_watch_item **watchers,
4833                            u32 *num_watchers)
4834 {
4835         u8 struct_v;
4836         u32 struct_len;
4837         int i;
4838         int ret;
4839
4840         ret = ceph_start_decoding(p, end, 1, "obj_list_watch_response_t",
4841                                   &struct_v, &struct_len);
4842         if (ret)
4843                 return ret;
4844
4845         *num_watchers = ceph_decode_32(p);
4846         *watchers = kcalloc(*num_watchers, sizeof(**watchers), GFP_NOIO);
4847         if (!*watchers)
4848                 return -ENOMEM;
4849
4850         for (i = 0; i < *num_watchers; i++) {
4851                 ret = decode_watcher(p, end, *watchers + i);
4852                 if (ret) {
4853                         kfree(*watchers);
4854                         return ret;
4855                 }
4856         }
4857
4858         return 0;
4859 }
4860
4861 /*
4862  * On success, the caller is responsible for:
4863  *
4864  *     kfree(watchers);
4865  */
4866 int ceph_osdc_list_watchers(struct ceph_osd_client *osdc,
4867                             struct ceph_object_id *oid,
4868                             struct ceph_object_locator *oloc,
4869                             struct ceph_watch_item **watchers,
4870                             u32 *num_watchers)
4871 {
4872         struct ceph_osd_request *req;
4873         struct page **pages;
4874         int ret;
4875
4876         req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO);
4877         if (!req)
4878                 return -ENOMEM;
4879
4880         ceph_oid_copy(&req->r_base_oid, oid);
4881         ceph_oloc_copy(&req->r_base_oloc, oloc);
4882         req->r_flags = CEPH_OSD_FLAG_READ;
4883
4884         ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
4885         if (ret)
4886                 goto out_put_req;
4887
4888         pages = ceph_alloc_page_vector(1, GFP_NOIO);
4889         if (IS_ERR(pages)) {
4890                 ret = PTR_ERR(pages);
4891                 goto out_put_req;
4892         }
4893
4894         osd_req_op_init(req, 0, CEPH_OSD_OP_LIST_WATCHERS, 0);
4895         ceph_osd_data_pages_init(osd_req_op_data(req, 0, list_watchers,
4896                                                  response_data),
4897                                  pages, PAGE_SIZE, 0, false, true);
4898
4899         ceph_osdc_start_request(osdc, req, false);
4900         ret = ceph_osdc_wait_request(osdc, req);
4901         if (ret >= 0) {
4902                 void *p = page_address(pages[0]);
4903                 void *const end = p + req->r_ops[0].outdata_len;
4904
4905                 ret = decode_watchers(&p, end, watchers, num_watchers);
4906         }
4907
4908 out_put_req:
4909         ceph_osdc_put_request(req);
4910         return ret;
4911 }
4912 EXPORT_SYMBOL(ceph_osdc_list_watchers);
4913
4914 /*
4915  * Call all pending notify callbacks - for use after a watch is
4916  * unregistered, to make sure no more callbacks for it will be invoked
4917  */
4918 void ceph_osdc_flush_notifies(struct ceph_osd_client *osdc)
4919 {
4920         dout("%s osdc %p\n", __func__, osdc);
4921         flush_workqueue(osdc->notify_wq);
4922 }
4923 EXPORT_SYMBOL(ceph_osdc_flush_notifies);
4924
4925 void ceph_osdc_maybe_request_map(struct ceph_osd_client *osdc)
4926 {
4927         down_read(&osdc->lock);
4928         maybe_request_map(osdc);
4929         up_read(&osdc->lock);
4930 }
4931 EXPORT_SYMBOL(ceph_osdc_maybe_request_map);
4932
4933 /*
4934  * Execute an OSD class method on an object.
4935  *
4936  * @flags: CEPH_OSD_FLAG_*
4937  * @resp_len: in/out param for reply length
4938  */
4939 int ceph_osdc_call(struct ceph_osd_client *osdc,
4940                    struct ceph_object_id *oid,
4941                    struct ceph_object_locator *oloc,
4942                    const char *class, const char *method,
4943                    unsigned int flags,
4944                    struct page *req_page, size_t req_len,
4945                    struct page *resp_page, size_t *resp_len)
4946 {
4947         struct ceph_osd_request *req;
4948         int ret;
4949
4950         if (req_len > PAGE_SIZE || (resp_page && *resp_len > PAGE_SIZE))
4951                 return -E2BIG;
4952
4953         req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO);
4954         if (!req)
4955                 return -ENOMEM;
4956
4957         ceph_oid_copy(&req->r_base_oid, oid);
4958         ceph_oloc_copy(&req->r_base_oloc, oloc);
4959         req->r_flags = flags;
4960
4961         ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
4962         if (ret)
4963                 goto out_put_req;
4964
4965         ret = osd_req_op_cls_init(req, 0, CEPH_OSD_OP_CALL, class, method);
4966         if (ret)
4967                 goto out_put_req;
4968
4969         if (req_page)
4970                 osd_req_op_cls_request_data_pages(req, 0, &req_page, req_len,
4971                                                   0, false, false);
4972         if (resp_page)
4973                 osd_req_op_cls_response_data_pages(req, 0, &resp_page,
4974                                                    *resp_len, 0, false, false);
4975
4976         ceph_osdc_start_request(osdc, req, false);
4977         ret = ceph_osdc_wait_request(osdc, req);
4978         if (ret >= 0) {
4979                 ret = req->r_ops[0].rval;
4980                 if (resp_page)
4981                         *resp_len = req->r_ops[0].outdata_len;
4982         }
4983
4984 out_put_req:
4985         ceph_osdc_put_request(req);
4986         return ret;
4987 }
4988 EXPORT_SYMBOL(ceph_osdc_call);
4989
4990 /*
4991  * init, shutdown
4992  */
4993 int ceph_osdc_init(struct ceph_osd_client *osdc, struct ceph_client *client)
4994 {
4995         int err;
4996
4997         dout("init\n");
4998         osdc->client = client;
4999         init_rwsem(&osdc->lock);
5000         osdc->osds = RB_ROOT;
5001         INIT_LIST_HEAD(&osdc->osd_lru);
5002         spin_lock_init(&osdc->osd_lru_lock);
5003         osd_init(&osdc->homeless_osd);
5004         osdc->homeless_osd.o_osdc = osdc;
5005         osdc->homeless_osd.o_osd = CEPH_HOMELESS_OSD;
5006         osdc->last_linger_id = CEPH_LINGER_ID_START;
5007         osdc->linger_requests = RB_ROOT;
5008         osdc->map_checks = RB_ROOT;
5009         osdc->linger_map_checks = RB_ROOT;
5010         INIT_DELAYED_WORK(&osdc->timeout_work, handle_timeout);
5011         INIT_DELAYED_WORK(&osdc->osds_timeout_work, handle_osds_timeout);
5012
5013         err = -ENOMEM;
5014         osdc->osdmap = ceph_osdmap_alloc();
5015         if (!osdc->osdmap)
5016                 goto out;
5017
5018         osdc->req_mempool = mempool_create_slab_pool(10,
5019                                                      ceph_osd_request_cache);
5020         if (!osdc->req_mempool)
5021                 goto out_map;
5022
5023         err = ceph_msgpool_init(&osdc->msgpool_op, CEPH_MSG_OSD_OP,
5024                                 PAGE_SIZE, 10, true, "osd_op");
5025         if (err < 0)
5026                 goto out_mempool;
5027         err = ceph_msgpool_init(&osdc->msgpool_op_reply, CEPH_MSG_OSD_OPREPLY,
5028                                 PAGE_SIZE, 10, true, "osd_op_reply");
5029         if (err < 0)
5030                 goto out_msgpool;
5031
5032         err = -ENOMEM;
5033         osdc->notify_wq = create_singlethread_workqueue("ceph-watch-notify");
5034         if (!osdc->notify_wq)
5035                 goto out_msgpool_reply;
5036
5037         osdc->completion_wq = create_singlethread_workqueue("ceph-completion");
5038         if (!osdc->completion_wq)
5039                 goto out_notify_wq;
5040
5041         schedule_delayed_work(&osdc->timeout_work,
5042                               osdc->client->options->osd_keepalive_timeout);
5043         schedule_delayed_work(&osdc->osds_timeout_work,
5044             round_jiffies_relative(osdc->client->options->osd_idle_ttl));
5045
5046         return 0;
5047
5048 out_notify_wq:
5049         destroy_workqueue(osdc->notify_wq);
5050 out_msgpool_reply:
5051         ceph_msgpool_destroy(&osdc->msgpool_op_reply);
5052 out_msgpool:
5053         ceph_msgpool_destroy(&osdc->msgpool_op);
5054 out_mempool:
5055         mempool_destroy(osdc->req_mempool);
5056 out_map:
5057         ceph_osdmap_destroy(osdc->osdmap);
5058 out:
5059         return err;
5060 }
5061
5062 void ceph_osdc_stop(struct ceph_osd_client *osdc)
5063 {
5064         destroy_workqueue(osdc->completion_wq);
5065         destroy_workqueue(osdc->notify_wq);
5066         cancel_delayed_work_sync(&osdc->timeout_work);
5067         cancel_delayed_work_sync(&osdc->osds_timeout_work);
5068
5069         down_write(&osdc->lock);
5070         while (!RB_EMPTY_ROOT(&osdc->osds)) {
5071                 struct ceph_osd *osd = rb_entry(rb_first(&osdc->osds),
5072                                                 struct ceph_osd, o_node);
5073                 close_osd(osd);
5074         }
5075         up_write(&osdc->lock);
5076         WARN_ON(refcount_read(&osdc->homeless_osd.o_ref) != 1);
5077         osd_cleanup(&osdc->homeless_osd);
5078
5079         WARN_ON(!list_empty(&osdc->osd_lru));
5080         WARN_ON(!RB_EMPTY_ROOT(&osdc->linger_requests));
5081         WARN_ON(!RB_EMPTY_ROOT(&osdc->map_checks));
5082         WARN_ON(!RB_EMPTY_ROOT(&osdc->linger_map_checks));
5083         WARN_ON(atomic_read(&osdc->num_requests));
5084         WARN_ON(atomic_read(&osdc->num_homeless));
5085
5086         ceph_osdmap_destroy(osdc->osdmap);
5087         mempool_destroy(osdc->req_mempool);
5088         ceph_msgpool_destroy(&osdc->msgpool_op);
5089         ceph_msgpool_destroy(&osdc->msgpool_op_reply);
5090 }
5091
5092 /*
5093  * Read some contiguous pages.  If we cross a stripe boundary, shorten
5094  * *plen.  Return number of bytes read, or error.
5095  */
5096 int ceph_osdc_readpages(struct ceph_osd_client *osdc,
5097                         struct ceph_vino vino, struct ceph_file_layout *layout,
5098                         u64 off, u64 *plen,
5099                         u32 truncate_seq, u64 truncate_size,
5100                         struct page **pages, int num_pages, int page_align)
5101 {
5102         struct ceph_osd_request *req;
5103         int rc = 0;
5104
5105         dout("readpages on ino %llx.%llx on %llu~%llu\n", vino.ino,
5106              vino.snap, off, *plen);
5107         req = ceph_osdc_new_request(osdc, layout, vino, off, plen, 0, 1,
5108                                     CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ,
5109                                     NULL, truncate_seq, truncate_size,
5110                                     false);
5111         if (IS_ERR(req))
5112                 return PTR_ERR(req);
5113
5114         /* it may be a short read due to an object boundary */
5115         osd_req_op_extent_osd_data_pages(req, 0,
5116                                 pages, *plen, page_align, false, false);
5117
5118         dout("readpages  final extent is %llu~%llu (%llu bytes align %d)\n",
5119              off, *plen, *plen, page_align);
5120
5121         rc = ceph_osdc_start_request(osdc, req, false);
5122         if (!rc)
5123                 rc = ceph_osdc_wait_request(osdc, req);
5124
5125         ceph_osdc_put_request(req);
5126         dout("readpages result %d\n", rc);
5127         return rc;
5128 }
5129 EXPORT_SYMBOL(ceph_osdc_readpages);
5130
5131 /*
5132  * do a synchronous write on N pages
5133  */
5134 int ceph_osdc_writepages(struct ceph_osd_client *osdc, struct ceph_vino vino,
5135                          struct ceph_file_layout *layout,
5136                          struct ceph_snap_context *snapc,
5137                          u64 off, u64 len,
5138                          u32 truncate_seq, u64 truncate_size,
5139                          struct timespec64 *mtime,
5140                          struct page **pages, int num_pages)
5141 {
5142         struct ceph_osd_request *req;
5143         int rc = 0;
5144         int page_align = off & ~PAGE_MASK;
5145
5146         req = ceph_osdc_new_request(osdc, layout, vino, off, &len, 0, 1,
5147                                     CEPH_OSD_OP_WRITE, CEPH_OSD_FLAG_WRITE,
5148                                     snapc, truncate_seq, truncate_size,
5149                                     true);
5150         if (IS_ERR(req))
5151                 return PTR_ERR(req);
5152
5153         /* it may be a short write due to an object boundary */
5154         osd_req_op_extent_osd_data_pages(req, 0, pages, len, page_align,
5155                                 false, false);
5156         dout("writepages %llu~%llu (%llu bytes)\n", off, len, len);
5157
5158         req->r_mtime = *mtime;
5159         rc = ceph_osdc_start_request(osdc, req, true);
5160         if (!rc)
5161                 rc = ceph_osdc_wait_request(osdc, req);
5162
5163         ceph_osdc_put_request(req);
5164         if (rc == 0)
5165                 rc = len;
5166         dout("writepages result %d\n", rc);
5167         return rc;
5168 }
5169 EXPORT_SYMBOL(ceph_osdc_writepages);
5170
5171 int __init ceph_osdc_setup(void)
5172 {
5173         size_t size = sizeof(struct ceph_osd_request) +
5174             CEPH_OSD_SLAB_OPS * sizeof(struct ceph_osd_req_op);
5175
5176         BUG_ON(ceph_osd_request_cache);
5177         ceph_osd_request_cache = kmem_cache_create("ceph_osd_request", size,
5178                                                    0, 0, NULL);
5179
5180         return ceph_osd_request_cache ? 0 : -ENOMEM;
5181 }
5182
5183 void ceph_osdc_cleanup(void)
5184 {
5185         BUG_ON(!ceph_osd_request_cache);
5186         kmem_cache_destroy(ceph_osd_request_cache);
5187         ceph_osd_request_cache = NULL;
5188 }
5189
5190 /*
5191  * handle incoming message
5192  */
5193 static void dispatch(struct ceph_connection *con, struct ceph_msg *msg)
5194 {
5195         struct ceph_osd *osd = con->private;
5196         struct ceph_osd_client *osdc = osd->o_osdc;
5197         int type = le16_to_cpu(msg->hdr.type);
5198
5199         switch (type) {
5200         case CEPH_MSG_OSD_MAP:
5201                 ceph_osdc_handle_map(osdc, msg);
5202                 break;
5203         case CEPH_MSG_OSD_OPREPLY:
5204                 handle_reply(osd, msg);
5205                 break;
5206         case CEPH_MSG_OSD_BACKOFF:
5207                 handle_backoff(osd, msg);
5208                 break;
5209         case CEPH_MSG_WATCH_NOTIFY:
5210                 handle_watch_notify(osdc, msg);
5211                 break;
5212
5213         default:
5214                 pr_err("received unknown message type %d %s\n", type,
5215                        ceph_msg_type_name(type));
5216         }
5217
5218         ceph_msg_put(msg);
5219 }
5220
5221 /*
5222  * Lookup and return message for incoming reply.  Don't try to do
5223  * anything about a larger than preallocated data portion of the
5224  * message at the moment - for now, just skip the message.
5225  */
5226 static struct ceph_msg *get_reply(struct ceph_connection *con,
5227                                   struct ceph_msg_header *hdr,
5228                                   int *skip)
5229 {
5230         struct ceph_osd *osd = con->private;
5231         struct ceph_osd_client *osdc = osd->o_osdc;
5232         struct ceph_msg *m = NULL;
5233         struct ceph_osd_request *req;
5234         int front_len = le32_to_cpu(hdr->front_len);
5235         int data_len = le32_to_cpu(hdr->data_len);
5236         u64 tid = le64_to_cpu(hdr->tid);
5237
5238         down_read(&osdc->lock);
5239         if (!osd_registered(osd)) {
5240                 dout("%s osd%d unknown, skipping\n", __func__, osd->o_osd);
5241                 *skip = 1;
5242                 goto out_unlock_osdc;
5243         }
5244         WARN_ON(osd->o_osd != le64_to_cpu(hdr->src.num));
5245
5246         mutex_lock(&osd->lock);
5247         req = lookup_request(&osd->o_requests, tid);
5248         if (!req) {
5249                 dout("%s osd%d tid %llu unknown, skipping\n", __func__,
5250                      osd->o_osd, tid);
5251                 *skip = 1;
5252                 goto out_unlock_session;
5253         }
5254
5255         ceph_msg_revoke_incoming(req->r_reply);
5256
5257         if (front_len > req->r_reply->front_alloc_len) {
5258                 pr_warn("%s osd%d tid %llu front %d > preallocated %d\n",
5259                         __func__, osd->o_osd, req->r_tid, front_len,
5260                         req->r_reply->front_alloc_len);
5261                 m = ceph_msg_new(CEPH_MSG_OSD_OPREPLY, front_len, GFP_NOFS,
5262                                  false);
5263                 if (!m)
5264                         goto out_unlock_session;
5265                 ceph_msg_put(req->r_reply);
5266                 req->r_reply = m;
5267         }
5268
5269         if (data_len > req->r_reply->data_length) {
5270                 pr_warn("%s osd%d tid %llu data %d > preallocated %zu, skipping\n",
5271                         __func__, osd->o_osd, req->r_tid, data_len,
5272                         req->r_reply->data_length);
5273                 m = NULL;
5274                 *skip = 1;
5275                 goto out_unlock_session;
5276         }
5277
5278         m = ceph_msg_get(req->r_reply);
5279         dout("get_reply tid %lld %p\n", tid, m);
5280
5281 out_unlock_session:
5282         mutex_unlock(&osd->lock);
5283 out_unlock_osdc:
5284         up_read(&osdc->lock);
5285         return m;
5286 }
5287
5288 /*
5289  * TODO: switch to a msg-owned pagelist
5290  */
5291 static struct ceph_msg *alloc_msg_with_page_vector(struct ceph_msg_header *hdr)
5292 {
5293         struct ceph_msg *m;
5294         int type = le16_to_cpu(hdr->type);
5295         u32 front_len = le32_to_cpu(hdr->front_len);
5296         u32 data_len = le32_to_cpu(hdr->data_len);
5297
5298         m = ceph_msg_new(type, front_len, GFP_NOIO, false);
5299         if (!m)
5300                 return NULL;
5301
5302         if (data_len) {
5303                 struct page **pages;
5304                 struct ceph_osd_data osd_data;
5305
5306                 pages = ceph_alloc_page_vector(calc_pages_for(0, data_len),
5307                                                GFP_NOIO);
5308                 if (IS_ERR(pages)) {
5309                         ceph_msg_put(m);
5310                         return NULL;
5311                 }
5312
5313                 ceph_osd_data_pages_init(&osd_data, pages, data_len, 0, false,
5314                                          false);
5315                 ceph_osdc_msg_data_add(m, &osd_data);
5316         }
5317
5318         return m;
5319 }
5320
5321 static struct ceph_msg *alloc_msg(struct ceph_connection *con,
5322                                   struct ceph_msg_header *hdr,
5323                                   int *skip)
5324 {
5325         struct ceph_osd *osd = con->private;
5326         int type = le16_to_cpu(hdr->type);
5327
5328         *skip = 0;
5329         switch (type) {
5330         case CEPH_MSG_OSD_MAP:
5331         case CEPH_MSG_OSD_BACKOFF:
5332         case CEPH_MSG_WATCH_NOTIFY:
5333                 return alloc_msg_with_page_vector(hdr);
5334         case CEPH_MSG_OSD_OPREPLY:
5335                 return get_reply(con, hdr, skip);
5336         default:
5337                 pr_warn("%s osd%d unknown msg type %d, skipping\n", __func__,
5338                         osd->o_osd, type);
5339                 *skip = 1;
5340                 return NULL;
5341         }
5342 }
5343
5344 /*
5345  * Wrappers to refcount containing ceph_osd struct
5346  */
5347 static struct ceph_connection *get_osd_con(struct ceph_connection *con)
5348 {
5349         struct ceph_osd *osd = con->private;
5350         if (get_osd(osd))
5351                 return con;
5352         return NULL;
5353 }
5354
5355 static void put_osd_con(struct ceph_connection *con)
5356 {
5357         struct ceph_osd *osd = con->private;
5358         put_osd(osd);
5359 }
5360
5361 /*
5362  * authentication
5363  */
5364 /*
5365  * Note: returned pointer is the address of a structure that's
5366  * managed separately.  Caller must *not* attempt to free it.
5367  */
5368 static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con,
5369                                         int *proto, int force_new)
5370 {
5371         struct ceph_osd *o = con->private;
5372         struct ceph_osd_client *osdc = o->o_osdc;
5373         struct ceph_auth_client *ac = osdc->client->monc.auth;
5374         struct ceph_auth_handshake *auth = &o->o_auth;
5375
5376         if (force_new && auth->authorizer) {
5377                 ceph_auth_destroy_authorizer(auth->authorizer);
5378                 auth->authorizer = NULL;
5379         }
5380         if (!auth->authorizer) {
5381                 int ret = ceph_auth_create_authorizer(ac, CEPH_ENTITY_TYPE_OSD,
5382                                                       auth);
5383                 if (ret)
5384                         return ERR_PTR(ret);
5385         } else {
5386                 int ret = ceph_auth_update_authorizer(ac, CEPH_ENTITY_TYPE_OSD,
5387                                                      auth);
5388                 if (ret)
5389                         return ERR_PTR(ret);
5390         }
5391         *proto = ac->protocol;
5392
5393         return auth;
5394 }
5395
5396 static int add_authorizer_challenge(struct ceph_connection *con,
5397                                     void *challenge_buf, int challenge_buf_len)
5398 {
5399         struct ceph_osd *o = con->private;
5400         struct ceph_osd_client *osdc = o->o_osdc;
5401         struct ceph_auth_client *ac = osdc->client->monc.auth;
5402
5403         return ceph_auth_add_authorizer_challenge(ac, o->o_auth.authorizer,
5404                                             challenge_buf, challenge_buf_len);
5405 }
5406
5407 static int verify_authorizer_reply(struct ceph_connection *con)
5408 {
5409         struct ceph_osd *o = con->private;
5410         struct ceph_osd_client *osdc = o->o_osdc;
5411         struct ceph_auth_client *ac = osdc->client->monc.auth;
5412
5413         return ceph_auth_verify_authorizer_reply(ac, o->o_auth.authorizer);
5414 }
5415
5416 static int invalidate_authorizer(struct ceph_connection *con)
5417 {
5418         struct ceph_osd *o = con->private;
5419         struct ceph_osd_client *osdc = o->o_osdc;
5420         struct ceph_auth_client *ac = osdc->client->monc.auth;
5421
5422         ceph_auth_invalidate_authorizer(ac, CEPH_ENTITY_TYPE_OSD);
5423         return ceph_monc_validate_auth(&osdc->client->monc);
5424 }
5425
5426 static void osd_reencode_message(struct ceph_msg *msg)
5427 {
5428         int type = le16_to_cpu(msg->hdr.type);
5429
5430         if (type == CEPH_MSG_OSD_OP)
5431                 encode_request_finish(msg);
5432 }
5433
5434 static int osd_sign_message(struct ceph_msg *msg)
5435 {
5436         struct ceph_osd *o = msg->con->private;
5437         struct ceph_auth_handshake *auth = &o->o_auth;
5438
5439         return ceph_auth_sign_message(auth, msg);
5440 }
5441
5442 static int osd_check_message_signature(struct ceph_msg *msg)
5443 {
5444         struct ceph_osd *o = msg->con->private;
5445         struct ceph_auth_handshake *auth = &o->o_auth;
5446
5447         return ceph_auth_check_message_signature(auth, msg);
5448 }
5449
5450 static const struct ceph_connection_operations osd_con_ops = {
5451         .get = get_osd_con,
5452         .put = put_osd_con,
5453         .dispatch = dispatch,
5454         .get_authorizer = get_authorizer,
5455         .add_authorizer_challenge = add_authorizer_challenge,
5456         .verify_authorizer_reply = verify_authorizer_reply,
5457         .invalidate_authorizer = invalidate_authorizer,
5458         .alloc_msg = alloc_msg,
5459         .reencode_message = osd_reencode_message,
5460         .sign_message = osd_sign_message,
5461         .check_message_signature = osd_check_message_signature,
5462         .fault = osd_fault,
5463 };