1 // SPDX-License-Identifier: GPL-2.0
3 #include <linux/ceph/ceph_debug.h>
5 #include <linux/module.h>
7 #include <linux/highmem.h>
9 #include <linux/pagemap.h>
10 #include <linux/slab.h>
11 #include <linux/uaccess.h>
13 #include <linux/bio.h>
16 #include <linux/ceph/ceph_features.h>
17 #include <linux/ceph/libceph.h>
18 #include <linux/ceph/osd_client.h>
19 #include <linux/ceph/messenger.h>
20 #include <linux/ceph/decode.h>
21 #include <linux/ceph/auth.h>
22 #include <linux/ceph/pagelist.h>
23 #include <linux/ceph/striper.h>
25 #define OSD_OPREPLY_FRONT_LEN 512
27 static struct kmem_cache *ceph_osd_request_cache;
29 static const struct ceph_connection_operations osd_con_ops;
32 * Implement client access to distributed object storage cluster.
34 * All data objects are stored within a cluster/cloud of OSDs, or
35 * "object storage devices." (Note that Ceph OSDs have _nothing_ to
36 * do with the T10 OSD extensions to SCSI.) Ceph OSDs are simply
37 * remote daemons serving up and coordinating consistent and safe
40 * Cluster membership and the mapping of data objects onto storage devices
41 * are described by the osd map.
43 * We keep track of pending OSD requests (read, write), resubmit
44 * requests to different OSDs when the cluster topology/data layout
45 * change, or retry the affected requests when the communications
46 * channel with an OSD is reset.
49 static void link_request(struct ceph_osd *osd, struct ceph_osd_request *req);
50 static void unlink_request(struct ceph_osd *osd, struct ceph_osd_request *req);
51 static void link_linger(struct ceph_osd *osd,
52 struct ceph_osd_linger_request *lreq);
53 static void unlink_linger(struct ceph_osd *osd,
54 struct ceph_osd_linger_request *lreq);
55 static void clear_backoffs(struct ceph_osd *osd);
58 static inline bool rwsem_is_wrlocked(struct rw_semaphore *sem)
62 if (unlikely(down_read_trylock(sem))) {
69 static inline void verify_osdc_locked(struct ceph_osd_client *osdc)
71 WARN_ON(!rwsem_is_locked(&osdc->lock));
73 static inline void verify_osdc_wrlocked(struct ceph_osd_client *osdc)
75 WARN_ON(!rwsem_is_wrlocked(&osdc->lock));
77 static inline void verify_osd_locked(struct ceph_osd *osd)
79 struct ceph_osd_client *osdc = osd->o_osdc;
81 WARN_ON(!(mutex_is_locked(&osd->lock) &&
82 rwsem_is_locked(&osdc->lock)) &&
83 !rwsem_is_wrlocked(&osdc->lock));
85 static inline void verify_lreq_locked(struct ceph_osd_linger_request *lreq)
87 WARN_ON(!mutex_is_locked(&lreq->lock));
90 static inline void verify_osdc_locked(struct ceph_osd_client *osdc) { }
91 static inline void verify_osdc_wrlocked(struct ceph_osd_client *osdc) { }
92 static inline void verify_osd_locked(struct ceph_osd *osd) { }
93 static inline void verify_lreq_locked(struct ceph_osd_linger_request *lreq) { }
97 * calculate the mapping of a file extent onto an object, and fill out the
98 * request accordingly. shorten extent as necessary if it crosses an
101 * fill osd op in request message.
103 static int calc_layout(struct ceph_file_layout *layout, u64 off, u64 *plen,
104 u64 *objnum, u64 *objoff, u64 *objlen)
106 u64 orig_len = *plen;
110 ceph_calc_file_object_mapping(layout, off, orig_len, objnum,
113 if (*objlen < orig_len) {
115 dout(" skipping last %llu, final file extent %llu~%llu\n",
116 orig_len - *plen, off, *plen);
119 dout("calc_layout objnum=%llx %llu~%llu\n", *objnum, *objoff, *objlen);
123 static void ceph_osd_data_init(struct ceph_osd_data *osd_data)
125 memset(osd_data, 0, sizeof (*osd_data));
126 osd_data->type = CEPH_OSD_DATA_TYPE_NONE;
129 static void ceph_osd_data_pages_init(struct ceph_osd_data *osd_data,
130 struct page **pages, u64 length, u32 alignment,
131 bool pages_from_pool, bool own_pages)
133 osd_data->type = CEPH_OSD_DATA_TYPE_PAGES;
134 osd_data->pages = pages;
135 osd_data->length = length;
136 osd_data->alignment = alignment;
137 osd_data->pages_from_pool = pages_from_pool;
138 osd_data->own_pages = own_pages;
141 static void ceph_osd_data_pagelist_init(struct ceph_osd_data *osd_data,
142 struct ceph_pagelist *pagelist)
144 osd_data->type = CEPH_OSD_DATA_TYPE_PAGELIST;
145 osd_data->pagelist = pagelist;
149 static void ceph_osd_data_bio_init(struct ceph_osd_data *osd_data,
150 struct ceph_bio_iter *bio_pos,
153 osd_data->type = CEPH_OSD_DATA_TYPE_BIO;
154 osd_data->bio_pos = *bio_pos;
155 osd_data->bio_length = bio_length;
157 #endif /* CONFIG_BLOCK */
159 static void ceph_osd_data_bvecs_init(struct ceph_osd_data *osd_data,
160 struct ceph_bvec_iter *bvec_pos)
162 osd_data->type = CEPH_OSD_DATA_TYPE_BVECS;
163 osd_data->bvec_pos = *bvec_pos;
166 #define osd_req_op_data(oreq, whch, typ, fld) \
168 struct ceph_osd_request *__oreq = (oreq); \
169 unsigned int __whch = (whch); \
170 BUG_ON(__whch >= __oreq->r_num_ops); \
171 &__oreq->r_ops[__whch].typ.fld; \
174 static struct ceph_osd_data *
175 osd_req_op_raw_data_in(struct ceph_osd_request *osd_req, unsigned int which)
177 BUG_ON(which >= osd_req->r_num_ops);
179 return &osd_req->r_ops[which].raw_data_in;
182 struct ceph_osd_data *
183 osd_req_op_extent_osd_data(struct ceph_osd_request *osd_req,
186 return osd_req_op_data(osd_req, which, extent, osd_data);
188 EXPORT_SYMBOL(osd_req_op_extent_osd_data);
190 void osd_req_op_raw_data_in_pages(struct ceph_osd_request *osd_req,
191 unsigned int which, struct page **pages,
192 u64 length, u32 alignment,
193 bool pages_from_pool, bool own_pages)
195 struct ceph_osd_data *osd_data;
197 osd_data = osd_req_op_raw_data_in(osd_req, which);
198 ceph_osd_data_pages_init(osd_data, pages, length, alignment,
199 pages_from_pool, own_pages);
201 EXPORT_SYMBOL(osd_req_op_raw_data_in_pages);
203 void osd_req_op_extent_osd_data_pages(struct ceph_osd_request *osd_req,
204 unsigned int which, struct page **pages,
205 u64 length, u32 alignment,
206 bool pages_from_pool, bool own_pages)
208 struct ceph_osd_data *osd_data;
210 osd_data = osd_req_op_data(osd_req, which, extent, osd_data);
211 ceph_osd_data_pages_init(osd_data, pages, length, alignment,
212 pages_from_pool, own_pages);
214 EXPORT_SYMBOL(osd_req_op_extent_osd_data_pages);
216 void osd_req_op_extent_osd_data_pagelist(struct ceph_osd_request *osd_req,
217 unsigned int which, struct ceph_pagelist *pagelist)
219 struct ceph_osd_data *osd_data;
221 osd_data = osd_req_op_data(osd_req, which, extent, osd_data);
222 ceph_osd_data_pagelist_init(osd_data, pagelist);
224 EXPORT_SYMBOL(osd_req_op_extent_osd_data_pagelist);
227 void osd_req_op_extent_osd_data_bio(struct ceph_osd_request *osd_req,
229 struct ceph_bio_iter *bio_pos,
232 struct ceph_osd_data *osd_data;
234 osd_data = osd_req_op_data(osd_req, which, extent, osd_data);
235 ceph_osd_data_bio_init(osd_data, bio_pos, bio_length);
237 EXPORT_SYMBOL(osd_req_op_extent_osd_data_bio);
238 #endif /* CONFIG_BLOCK */
240 void osd_req_op_extent_osd_data_bvec_pos(struct ceph_osd_request *osd_req,
242 struct ceph_bvec_iter *bvec_pos)
244 struct ceph_osd_data *osd_data;
246 osd_data = osd_req_op_data(osd_req, which, extent, osd_data);
247 ceph_osd_data_bvecs_init(osd_data, bvec_pos);
249 EXPORT_SYMBOL(osd_req_op_extent_osd_data_bvec_pos);
251 static void osd_req_op_cls_request_info_pagelist(
252 struct ceph_osd_request *osd_req,
253 unsigned int which, struct ceph_pagelist *pagelist)
255 struct ceph_osd_data *osd_data;
257 osd_data = osd_req_op_data(osd_req, which, cls, request_info);
258 ceph_osd_data_pagelist_init(osd_data, pagelist);
261 void osd_req_op_cls_request_data_pagelist(
262 struct ceph_osd_request *osd_req,
263 unsigned int which, struct ceph_pagelist *pagelist)
265 struct ceph_osd_data *osd_data;
267 osd_data = osd_req_op_data(osd_req, which, cls, request_data);
268 ceph_osd_data_pagelist_init(osd_data, pagelist);
269 osd_req->r_ops[which].cls.indata_len += pagelist->length;
270 osd_req->r_ops[which].indata_len += pagelist->length;
272 EXPORT_SYMBOL(osd_req_op_cls_request_data_pagelist);
274 void osd_req_op_cls_request_data_pages(struct ceph_osd_request *osd_req,
275 unsigned int which, struct page **pages, u64 length,
276 u32 alignment, bool pages_from_pool, bool own_pages)
278 struct ceph_osd_data *osd_data;
280 osd_data = osd_req_op_data(osd_req, which, cls, request_data);
281 ceph_osd_data_pages_init(osd_data, pages, length, alignment,
282 pages_from_pool, own_pages);
283 osd_req->r_ops[which].cls.indata_len += length;
284 osd_req->r_ops[which].indata_len += length;
286 EXPORT_SYMBOL(osd_req_op_cls_request_data_pages);
288 void osd_req_op_cls_request_data_bvecs(struct ceph_osd_request *osd_req,
290 struct bio_vec *bvecs, u32 bytes)
292 struct ceph_osd_data *osd_data;
293 struct ceph_bvec_iter it = {
295 .iter = { .bi_size = bytes },
298 osd_data = osd_req_op_data(osd_req, which, cls, request_data);
299 ceph_osd_data_bvecs_init(osd_data, &it);
300 osd_req->r_ops[which].cls.indata_len += bytes;
301 osd_req->r_ops[which].indata_len += bytes;
303 EXPORT_SYMBOL(osd_req_op_cls_request_data_bvecs);
305 void osd_req_op_cls_response_data_pages(struct ceph_osd_request *osd_req,
306 unsigned int which, struct page **pages, u64 length,
307 u32 alignment, bool pages_from_pool, bool own_pages)
309 struct ceph_osd_data *osd_data;
311 osd_data = osd_req_op_data(osd_req, which, cls, response_data);
312 ceph_osd_data_pages_init(osd_data, pages, length, alignment,
313 pages_from_pool, own_pages);
315 EXPORT_SYMBOL(osd_req_op_cls_response_data_pages);
317 static u64 ceph_osd_data_length(struct ceph_osd_data *osd_data)
319 switch (osd_data->type) {
320 case CEPH_OSD_DATA_TYPE_NONE:
322 case CEPH_OSD_DATA_TYPE_PAGES:
323 return osd_data->length;
324 case CEPH_OSD_DATA_TYPE_PAGELIST:
325 return (u64)osd_data->pagelist->length;
327 case CEPH_OSD_DATA_TYPE_BIO:
328 return (u64)osd_data->bio_length;
329 #endif /* CONFIG_BLOCK */
330 case CEPH_OSD_DATA_TYPE_BVECS:
331 return osd_data->bvec_pos.iter.bi_size;
333 WARN(true, "unrecognized data type %d\n", (int)osd_data->type);
338 static void ceph_osd_data_release(struct ceph_osd_data *osd_data)
340 if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES && osd_data->own_pages) {
343 num_pages = calc_pages_for((u64)osd_data->alignment,
344 (u64)osd_data->length);
345 ceph_release_page_vector(osd_data->pages, num_pages);
347 ceph_osd_data_init(osd_data);
350 static void osd_req_op_data_release(struct ceph_osd_request *osd_req,
353 struct ceph_osd_req_op *op;
355 BUG_ON(which >= osd_req->r_num_ops);
356 op = &osd_req->r_ops[which];
359 case CEPH_OSD_OP_READ:
360 case CEPH_OSD_OP_WRITE:
361 case CEPH_OSD_OP_WRITEFULL:
362 ceph_osd_data_release(&op->extent.osd_data);
364 case CEPH_OSD_OP_CALL:
365 ceph_osd_data_release(&op->cls.request_info);
366 ceph_osd_data_release(&op->cls.request_data);
367 ceph_osd_data_release(&op->cls.response_data);
369 case CEPH_OSD_OP_SETXATTR:
370 case CEPH_OSD_OP_CMPXATTR:
371 ceph_osd_data_release(&op->xattr.osd_data);
373 case CEPH_OSD_OP_STAT:
374 ceph_osd_data_release(&op->raw_data_in);
376 case CEPH_OSD_OP_NOTIFY_ACK:
377 ceph_osd_data_release(&op->notify_ack.request_data);
379 case CEPH_OSD_OP_NOTIFY:
380 ceph_osd_data_release(&op->notify.request_data);
381 ceph_osd_data_release(&op->notify.response_data);
383 case CEPH_OSD_OP_LIST_WATCHERS:
384 ceph_osd_data_release(&op->list_watchers.response_data);
392 * Assumes @t is zero-initialized.
394 static void target_init(struct ceph_osd_request_target *t)
396 ceph_oid_init(&t->base_oid);
397 ceph_oloc_init(&t->base_oloc);
398 ceph_oid_init(&t->target_oid);
399 ceph_oloc_init(&t->target_oloc);
401 ceph_osds_init(&t->acting);
402 ceph_osds_init(&t->up);
406 t->osd = CEPH_HOMELESS_OSD;
409 static void target_copy(struct ceph_osd_request_target *dest,
410 const struct ceph_osd_request_target *src)
412 ceph_oid_copy(&dest->base_oid, &src->base_oid);
413 ceph_oloc_copy(&dest->base_oloc, &src->base_oloc);
414 ceph_oid_copy(&dest->target_oid, &src->target_oid);
415 ceph_oloc_copy(&dest->target_oloc, &src->target_oloc);
417 dest->pgid = src->pgid; /* struct */
418 dest->spgid = src->spgid; /* struct */
419 dest->pg_num = src->pg_num;
420 dest->pg_num_mask = src->pg_num_mask;
421 ceph_osds_copy(&dest->acting, &src->acting);
422 ceph_osds_copy(&dest->up, &src->up);
423 dest->size = src->size;
424 dest->min_size = src->min_size;
425 dest->sort_bitwise = src->sort_bitwise;
427 dest->flags = src->flags;
428 dest->paused = src->paused;
430 dest->epoch = src->epoch;
431 dest->last_force_resend = src->last_force_resend;
433 dest->osd = src->osd;
436 static void target_destroy(struct ceph_osd_request_target *t)
438 ceph_oid_destroy(&t->base_oid);
439 ceph_oloc_destroy(&t->base_oloc);
440 ceph_oid_destroy(&t->target_oid);
441 ceph_oloc_destroy(&t->target_oloc);
447 static void request_release_checks(struct ceph_osd_request *req)
449 WARN_ON(!RB_EMPTY_NODE(&req->r_node));
450 WARN_ON(!RB_EMPTY_NODE(&req->r_mc_node));
451 WARN_ON(!list_empty(&req->r_unsafe_item));
455 static void ceph_osdc_release_request(struct kref *kref)
457 struct ceph_osd_request *req = container_of(kref,
458 struct ceph_osd_request, r_kref);
461 dout("%s %p (r_request %p r_reply %p)\n", __func__, req,
462 req->r_request, req->r_reply);
463 request_release_checks(req);
466 ceph_msg_put(req->r_request);
468 ceph_msg_put(req->r_reply);
470 for (which = 0; which < req->r_num_ops; which++)
471 osd_req_op_data_release(req, which);
473 target_destroy(&req->r_t);
474 ceph_put_snap_context(req->r_snapc);
477 mempool_free(req, req->r_osdc->req_mempool);
478 else if (req->r_num_ops <= CEPH_OSD_SLAB_OPS)
479 kmem_cache_free(ceph_osd_request_cache, req);
484 void ceph_osdc_get_request(struct ceph_osd_request *req)
486 dout("%s %p (was %d)\n", __func__, req,
487 kref_read(&req->r_kref));
488 kref_get(&req->r_kref);
490 EXPORT_SYMBOL(ceph_osdc_get_request);
492 void ceph_osdc_put_request(struct ceph_osd_request *req)
495 dout("%s %p (was %d)\n", __func__, req,
496 kref_read(&req->r_kref));
497 kref_put(&req->r_kref, ceph_osdc_release_request);
500 EXPORT_SYMBOL(ceph_osdc_put_request);
502 static void request_init(struct ceph_osd_request *req)
504 /* req only, each op is zeroed in _osd_req_op_init() */
505 memset(req, 0, sizeof(*req));
507 kref_init(&req->r_kref);
508 init_completion(&req->r_completion);
509 RB_CLEAR_NODE(&req->r_node);
510 RB_CLEAR_NODE(&req->r_mc_node);
511 INIT_LIST_HEAD(&req->r_unsafe_item);
513 target_init(&req->r_t);
517 * This is ugly, but it allows us to reuse linger registration and ping
518 * requests, keeping the structure of the code around send_linger{_ping}()
519 * reasonable. Setting up a min_nr=2 mempool for each linger request
520 * and dealing with copying ops (this blasts req only, watch op remains
521 * intact) isn't any better.
523 static void request_reinit(struct ceph_osd_request *req)
525 struct ceph_osd_client *osdc = req->r_osdc;
526 bool mempool = req->r_mempool;
527 unsigned int num_ops = req->r_num_ops;
528 u64 snapid = req->r_snapid;
529 struct ceph_snap_context *snapc = req->r_snapc;
530 bool linger = req->r_linger;
531 struct ceph_msg *request_msg = req->r_request;
532 struct ceph_msg *reply_msg = req->r_reply;
534 dout("%s req %p\n", __func__, req);
535 WARN_ON(kref_read(&req->r_kref) != 1);
536 request_release_checks(req);
538 WARN_ON(kref_read(&request_msg->kref) != 1);
539 WARN_ON(kref_read(&reply_msg->kref) != 1);
540 target_destroy(&req->r_t);
544 req->r_mempool = mempool;
545 req->r_num_ops = num_ops;
546 req->r_snapid = snapid;
547 req->r_snapc = snapc;
548 req->r_linger = linger;
549 req->r_request = request_msg;
550 req->r_reply = reply_msg;
553 struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc,
554 struct ceph_snap_context *snapc,
555 unsigned int num_ops,
559 struct ceph_osd_request *req;
562 BUG_ON(num_ops > CEPH_OSD_SLAB_OPS);
563 req = mempool_alloc(osdc->req_mempool, gfp_flags);
564 } else if (num_ops <= CEPH_OSD_SLAB_OPS) {
565 req = kmem_cache_alloc(ceph_osd_request_cache, gfp_flags);
567 BUG_ON(num_ops > CEPH_OSD_MAX_OPS);
568 req = kmalloc(sizeof(*req) + num_ops * sizeof(req->r_ops[0]),
576 req->r_mempool = use_mempool;
577 req->r_num_ops = num_ops;
578 req->r_snapid = CEPH_NOSNAP;
579 req->r_snapc = ceph_get_snap_context(snapc);
581 dout("%s req %p\n", __func__, req);
584 EXPORT_SYMBOL(ceph_osdc_alloc_request);
586 static int ceph_oloc_encoding_size(const struct ceph_object_locator *oloc)
588 return 8 + 4 + 4 + 4 + (oloc->pool_ns ? oloc->pool_ns->len : 0);
591 int ceph_osdc_alloc_messages(struct ceph_osd_request *req, gfp_t gfp)
593 struct ceph_osd_client *osdc = req->r_osdc;
594 struct ceph_msg *msg;
597 WARN_ON(ceph_oid_empty(&req->r_base_oid));
598 WARN_ON(ceph_oloc_empty(&req->r_base_oloc));
600 /* create request message */
601 msg_size = CEPH_ENCODING_START_BLK_LEN +
602 CEPH_PGID_ENCODING_LEN + 1; /* spgid */
603 msg_size += 4 + 4 + 4; /* hash, osdmap_epoch, flags */
604 msg_size += CEPH_ENCODING_START_BLK_LEN +
605 sizeof(struct ceph_osd_reqid); /* reqid */
606 msg_size += sizeof(struct ceph_blkin_trace_info); /* trace */
607 msg_size += 4 + sizeof(struct ceph_timespec); /* client_inc, mtime */
608 msg_size += CEPH_ENCODING_START_BLK_LEN +
609 ceph_oloc_encoding_size(&req->r_base_oloc); /* oloc */
610 msg_size += 4 + req->r_base_oid.name_len; /* oid */
611 msg_size += 2 + req->r_num_ops * sizeof(struct ceph_osd_op);
612 msg_size += 8; /* snapid */
613 msg_size += 8; /* snap_seq */
614 msg_size += 4 + 8 * (req->r_snapc ? req->r_snapc->num_snaps : 0);
615 msg_size += 4 + 8; /* retry_attempt, features */
618 msg = ceph_msgpool_get(&osdc->msgpool_op, 0);
620 msg = ceph_msg_new(CEPH_MSG_OSD_OP, msg_size, gfp, true);
624 memset(msg->front.iov_base, 0, msg->front.iov_len);
625 req->r_request = msg;
627 /* create reply message */
628 msg_size = OSD_OPREPLY_FRONT_LEN;
629 msg_size += req->r_base_oid.name_len;
630 msg_size += req->r_num_ops * sizeof(struct ceph_osd_op);
633 msg = ceph_msgpool_get(&osdc->msgpool_op_reply, 0);
635 msg = ceph_msg_new(CEPH_MSG_OSD_OPREPLY, msg_size, gfp, true);
643 EXPORT_SYMBOL(ceph_osdc_alloc_messages);
645 static bool osd_req_opcode_valid(u16 opcode)
648 #define GENERATE_CASE(op, opcode, str) case CEPH_OSD_OP_##op: return true;
649 __CEPH_FORALL_OSD_OPS(GENERATE_CASE)
657 * This is an osd op init function for opcodes that have no data or
658 * other information associated with them. It also serves as a
659 * common init routine for all the other init functions, below.
661 static struct ceph_osd_req_op *
662 _osd_req_op_init(struct ceph_osd_request *osd_req, unsigned int which,
663 u16 opcode, u32 flags)
665 struct ceph_osd_req_op *op;
667 BUG_ON(which >= osd_req->r_num_ops);
668 BUG_ON(!osd_req_opcode_valid(opcode));
670 op = &osd_req->r_ops[which];
671 memset(op, 0, sizeof (*op));
678 void osd_req_op_init(struct ceph_osd_request *osd_req,
679 unsigned int which, u16 opcode, u32 flags)
681 (void)_osd_req_op_init(osd_req, which, opcode, flags);
683 EXPORT_SYMBOL(osd_req_op_init);
685 void osd_req_op_extent_init(struct ceph_osd_request *osd_req,
686 unsigned int which, u16 opcode,
687 u64 offset, u64 length,
688 u64 truncate_size, u32 truncate_seq)
690 struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which,
692 size_t payload_len = 0;
694 BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE &&
695 opcode != CEPH_OSD_OP_WRITEFULL && opcode != CEPH_OSD_OP_ZERO &&
696 opcode != CEPH_OSD_OP_TRUNCATE);
698 op->extent.offset = offset;
699 op->extent.length = length;
700 op->extent.truncate_size = truncate_size;
701 op->extent.truncate_seq = truncate_seq;
702 if (opcode == CEPH_OSD_OP_WRITE || opcode == CEPH_OSD_OP_WRITEFULL)
703 payload_len += length;
705 op->indata_len = payload_len;
707 EXPORT_SYMBOL(osd_req_op_extent_init);
709 void osd_req_op_extent_update(struct ceph_osd_request *osd_req,
710 unsigned int which, u64 length)
712 struct ceph_osd_req_op *op;
715 BUG_ON(which >= osd_req->r_num_ops);
716 op = &osd_req->r_ops[which];
717 previous = op->extent.length;
719 if (length == previous)
720 return; /* Nothing to do */
721 BUG_ON(length > previous);
723 op->extent.length = length;
724 if (op->op == CEPH_OSD_OP_WRITE || op->op == CEPH_OSD_OP_WRITEFULL)
725 op->indata_len -= previous - length;
727 EXPORT_SYMBOL(osd_req_op_extent_update);
729 void osd_req_op_extent_dup_last(struct ceph_osd_request *osd_req,
730 unsigned int which, u64 offset_inc)
732 struct ceph_osd_req_op *op, *prev_op;
734 BUG_ON(which + 1 >= osd_req->r_num_ops);
736 prev_op = &osd_req->r_ops[which];
737 op = _osd_req_op_init(osd_req, which + 1, prev_op->op, prev_op->flags);
738 /* dup previous one */
739 op->indata_len = prev_op->indata_len;
740 op->outdata_len = prev_op->outdata_len;
741 op->extent = prev_op->extent;
743 op->extent.offset += offset_inc;
744 op->extent.length -= offset_inc;
746 if (op->op == CEPH_OSD_OP_WRITE || op->op == CEPH_OSD_OP_WRITEFULL)
747 op->indata_len -= offset_inc;
749 EXPORT_SYMBOL(osd_req_op_extent_dup_last);
751 void osd_req_op_cls_init(struct ceph_osd_request *osd_req, unsigned int which,
752 u16 opcode, const char *class, const char *method)
754 struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which,
756 struct ceph_pagelist *pagelist;
757 size_t payload_len = 0;
760 BUG_ON(opcode != CEPH_OSD_OP_CALL);
762 pagelist = kmalloc(sizeof (*pagelist), GFP_NOFS);
764 ceph_pagelist_init(pagelist);
766 op->cls.class_name = class;
767 size = strlen(class);
768 BUG_ON(size > (size_t) U8_MAX);
769 op->cls.class_len = size;
770 ceph_pagelist_append(pagelist, class, size);
773 op->cls.method_name = method;
774 size = strlen(method);
775 BUG_ON(size > (size_t) U8_MAX);
776 op->cls.method_len = size;
777 ceph_pagelist_append(pagelist, method, size);
780 osd_req_op_cls_request_info_pagelist(osd_req, which, pagelist);
782 op->indata_len = payload_len;
784 EXPORT_SYMBOL(osd_req_op_cls_init);
786 int osd_req_op_xattr_init(struct ceph_osd_request *osd_req, unsigned int which,
787 u16 opcode, const char *name, const void *value,
788 size_t size, u8 cmp_op, u8 cmp_mode)
790 struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which,
792 struct ceph_pagelist *pagelist;
795 BUG_ON(opcode != CEPH_OSD_OP_SETXATTR && opcode != CEPH_OSD_OP_CMPXATTR);
797 pagelist = kmalloc(sizeof(*pagelist), GFP_NOFS);
801 ceph_pagelist_init(pagelist);
803 payload_len = strlen(name);
804 op->xattr.name_len = payload_len;
805 ceph_pagelist_append(pagelist, name, payload_len);
807 op->xattr.value_len = size;
808 ceph_pagelist_append(pagelist, value, size);
811 op->xattr.cmp_op = cmp_op;
812 op->xattr.cmp_mode = cmp_mode;
814 ceph_osd_data_pagelist_init(&op->xattr.osd_data, pagelist);
815 op->indata_len = payload_len;
818 EXPORT_SYMBOL(osd_req_op_xattr_init);
821 * @watch_opcode: CEPH_OSD_WATCH_OP_*
823 static void osd_req_op_watch_init(struct ceph_osd_request *req, int which,
824 u64 cookie, u8 watch_opcode)
826 struct ceph_osd_req_op *op;
828 op = _osd_req_op_init(req, which, CEPH_OSD_OP_WATCH, 0);
829 op->watch.cookie = cookie;
830 op->watch.op = watch_opcode;
834 void osd_req_op_alloc_hint_init(struct ceph_osd_request *osd_req,
836 u64 expected_object_size,
837 u64 expected_write_size)
839 struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which,
840 CEPH_OSD_OP_SETALLOCHINT,
843 op->alloc_hint.expected_object_size = expected_object_size;
844 op->alloc_hint.expected_write_size = expected_write_size;
847 * CEPH_OSD_OP_SETALLOCHINT op is advisory and therefore deemed
848 * not worth a feature bit. Set FAILOK per-op flag to make
849 * sure older osds don't trip over an unsupported opcode.
851 op->flags |= CEPH_OSD_OP_FLAG_FAILOK;
853 EXPORT_SYMBOL(osd_req_op_alloc_hint_init);
855 static void ceph_osdc_msg_data_add(struct ceph_msg *msg,
856 struct ceph_osd_data *osd_data)
858 u64 length = ceph_osd_data_length(osd_data);
860 if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES) {
861 BUG_ON(length > (u64) SIZE_MAX);
863 ceph_msg_data_add_pages(msg, osd_data->pages,
864 length, osd_data->alignment);
865 } else if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGELIST) {
867 ceph_msg_data_add_pagelist(msg, osd_data->pagelist);
869 } else if (osd_data->type == CEPH_OSD_DATA_TYPE_BIO) {
870 ceph_msg_data_add_bio(msg, &osd_data->bio_pos, length);
872 } else if (osd_data->type == CEPH_OSD_DATA_TYPE_BVECS) {
873 ceph_msg_data_add_bvecs(msg, &osd_data->bvec_pos);
875 BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_NONE);
879 static u32 osd_req_encode_op(struct ceph_osd_op *dst,
880 const struct ceph_osd_req_op *src)
882 if (WARN_ON(!osd_req_opcode_valid(src->op))) {
883 pr_err("unrecognized osd opcode %d\n", src->op);
889 case CEPH_OSD_OP_STAT:
891 case CEPH_OSD_OP_READ:
892 case CEPH_OSD_OP_WRITE:
893 case CEPH_OSD_OP_WRITEFULL:
894 case CEPH_OSD_OP_ZERO:
895 case CEPH_OSD_OP_TRUNCATE:
896 dst->extent.offset = cpu_to_le64(src->extent.offset);
897 dst->extent.length = cpu_to_le64(src->extent.length);
898 dst->extent.truncate_size =
899 cpu_to_le64(src->extent.truncate_size);
900 dst->extent.truncate_seq =
901 cpu_to_le32(src->extent.truncate_seq);
903 case CEPH_OSD_OP_CALL:
904 dst->cls.class_len = src->cls.class_len;
905 dst->cls.method_len = src->cls.method_len;
906 dst->cls.indata_len = cpu_to_le32(src->cls.indata_len);
908 case CEPH_OSD_OP_WATCH:
909 dst->watch.cookie = cpu_to_le64(src->watch.cookie);
910 dst->watch.ver = cpu_to_le64(0);
911 dst->watch.op = src->watch.op;
912 dst->watch.gen = cpu_to_le32(src->watch.gen);
914 case CEPH_OSD_OP_NOTIFY_ACK:
916 case CEPH_OSD_OP_NOTIFY:
917 dst->notify.cookie = cpu_to_le64(src->notify.cookie);
919 case CEPH_OSD_OP_LIST_WATCHERS:
921 case CEPH_OSD_OP_SETALLOCHINT:
922 dst->alloc_hint.expected_object_size =
923 cpu_to_le64(src->alloc_hint.expected_object_size);
924 dst->alloc_hint.expected_write_size =
925 cpu_to_le64(src->alloc_hint.expected_write_size);
927 case CEPH_OSD_OP_SETXATTR:
928 case CEPH_OSD_OP_CMPXATTR:
929 dst->xattr.name_len = cpu_to_le32(src->xattr.name_len);
930 dst->xattr.value_len = cpu_to_le32(src->xattr.value_len);
931 dst->xattr.cmp_op = src->xattr.cmp_op;
932 dst->xattr.cmp_mode = src->xattr.cmp_mode;
934 case CEPH_OSD_OP_CREATE:
935 case CEPH_OSD_OP_DELETE:
938 pr_err("unsupported osd opcode %s\n",
939 ceph_osd_op_name(src->op));
945 dst->op = cpu_to_le16(src->op);
946 dst->flags = cpu_to_le32(src->flags);
947 dst->payload_len = cpu_to_le32(src->indata_len);
949 return src->indata_len;
953 * build new request AND message, calculate layout, and adjust file
956 * if the file was recently truncated, we include information about its
957 * old and new size so that the object can be updated appropriately. (we
958 * avoid synchronously deleting truncated objects because it's slow.)
960 struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc,
961 struct ceph_file_layout *layout,
962 struct ceph_vino vino,
964 unsigned int which, int num_ops,
965 int opcode, int flags,
966 struct ceph_snap_context *snapc,
971 struct ceph_osd_request *req;
977 BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE &&
978 opcode != CEPH_OSD_OP_ZERO && opcode != CEPH_OSD_OP_TRUNCATE &&
979 opcode != CEPH_OSD_OP_CREATE && opcode != CEPH_OSD_OP_DELETE);
981 req = ceph_osdc_alloc_request(osdc, snapc, num_ops, use_mempool,
988 /* calculate max write size */
989 r = calc_layout(layout, off, plen, &objnum, &objoff, &objlen);
993 if (opcode == CEPH_OSD_OP_CREATE || opcode == CEPH_OSD_OP_DELETE) {
994 osd_req_op_init(req, which, opcode, 0);
996 u32 object_size = layout->object_size;
997 u32 object_base = off - objoff;
998 if (!(truncate_seq == 1 && truncate_size == -1ULL)) {
999 if (truncate_size <= object_base) {
1002 truncate_size -= object_base;
1003 if (truncate_size > object_size)
1004 truncate_size = object_size;
1007 osd_req_op_extent_init(req, which, opcode, objoff, objlen,
1008 truncate_size, truncate_seq);
1011 req->r_abort_on_full = true;
1012 req->r_flags = flags;
1013 req->r_base_oloc.pool = layout->pool_id;
1014 req->r_base_oloc.pool_ns = ceph_try_get_string(layout->pool_ns);
1015 ceph_oid_printf(&req->r_base_oid, "%llx.%08llx", vino.ino, objnum);
1017 req->r_snapid = vino.snap;
1018 if (flags & CEPH_OSD_FLAG_WRITE)
1019 req->r_data_offset = off;
1021 r = ceph_osdc_alloc_messages(req, GFP_NOFS);
1028 ceph_osdc_put_request(req);
1031 EXPORT_SYMBOL(ceph_osdc_new_request);
1034 * We keep osd requests in an rbtree, sorted by ->r_tid.
1036 DEFINE_RB_FUNCS(request, struct ceph_osd_request, r_tid, r_node)
1037 DEFINE_RB_FUNCS(request_mc, struct ceph_osd_request, r_tid, r_mc_node)
1039 static bool osd_homeless(struct ceph_osd *osd)
1041 return osd->o_osd == CEPH_HOMELESS_OSD;
1044 static bool osd_registered(struct ceph_osd *osd)
1046 verify_osdc_locked(osd->o_osdc);
1048 return !RB_EMPTY_NODE(&osd->o_node);
1052 * Assumes @osd is zero-initialized.
1054 static void osd_init(struct ceph_osd *osd)
1056 refcount_set(&osd->o_ref, 1);
1057 RB_CLEAR_NODE(&osd->o_node);
1058 osd->o_requests = RB_ROOT;
1059 osd->o_linger_requests = RB_ROOT;
1060 osd->o_backoff_mappings = RB_ROOT;
1061 osd->o_backoffs_by_id = RB_ROOT;
1062 INIT_LIST_HEAD(&osd->o_osd_lru);
1063 INIT_LIST_HEAD(&osd->o_keepalive_item);
1064 osd->o_incarnation = 1;
1065 mutex_init(&osd->lock);
1068 static void osd_cleanup(struct ceph_osd *osd)
1070 WARN_ON(!RB_EMPTY_NODE(&osd->o_node));
1071 WARN_ON(!RB_EMPTY_ROOT(&osd->o_requests));
1072 WARN_ON(!RB_EMPTY_ROOT(&osd->o_linger_requests));
1073 WARN_ON(!RB_EMPTY_ROOT(&osd->o_backoff_mappings));
1074 WARN_ON(!RB_EMPTY_ROOT(&osd->o_backoffs_by_id));
1075 WARN_ON(!list_empty(&osd->o_osd_lru));
1076 WARN_ON(!list_empty(&osd->o_keepalive_item));
1078 if (osd->o_auth.authorizer) {
1079 WARN_ON(osd_homeless(osd));
1080 ceph_auth_destroy_authorizer(osd->o_auth.authorizer);
1085 * Track open sessions with osds.
1087 static struct ceph_osd *create_osd(struct ceph_osd_client *osdc, int onum)
1089 struct ceph_osd *osd;
1091 WARN_ON(onum == CEPH_HOMELESS_OSD);
1093 osd = kzalloc(sizeof(*osd), GFP_NOIO | __GFP_NOFAIL);
1098 ceph_con_init(&osd->o_con, osd, &osd_con_ops, &osdc->client->msgr);
1103 static struct ceph_osd *get_osd(struct ceph_osd *osd)
1105 if (refcount_inc_not_zero(&osd->o_ref)) {
1106 dout("get_osd %p %d -> %d\n", osd, refcount_read(&osd->o_ref)-1,
1107 refcount_read(&osd->o_ref));
1110 dout("get_osd %p FAIL\n", osd);
1115 static void put_osd(struct ceph_osd *osd)
1117 dout("put_osd %p %d -> %d\n", osd, refcount_read(&osd->o_ref),
1118 refcount_read(&osd->o_ref) - 1);
1119 if (refcount_dec_and_test(&osd->o_ref)) {
1125 DEFINE_RB_FUNCS(osd, struct ceph_osd, o_osd, o_node)
1127 static void __move_osd_to_lru(struct ceph_osd *osd)
1129 struct ceph_osd_client *osdc = osd->o_osdc;
1131 dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
1132 BUG_ON(!list_empty(&osd->o_osd_lru));
1134 spin_lock(&osdc->osd_lru_lock);
1135 list_add_tail(&osd->o_osd_lru, &osdc->osd_lru);
1136 spin_unlock(&osdc->osd_lru_lock);
1138 osd->lru_ttl = jiffies + osdc->client->options->osd_idle_ttl;
1141 static void maybe_move_osd_to_lru(struct ceph_osd *osd)
1143 if (RB_EMPTY_ROOT(&osd->o_requests) &&
1144 RB_EMPTY_ROOT(&osd->o_linger_requests))
1145 __move_osd_to_lru(osd);
1148 static void __remove_osd_from_lru(struct ceph_osd *osd)
1150 struct ceph_osd_client *osdc = osd->o_osdc;
1152 dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
1154 spin_lock(&osdc->osd_lru_lock);
1155 if (!list_empty(&osd->o_osd_lru))
1156 list_del_init(&osd->o_osd_lru);
1157 spin_unlock(&osdc->osd_lru_lock);
1161 * Close the connection and assign any leftover requests to the
1164 static void close_osd(struct ceph_osd *osd)
1166 struct ceph_osd_client *osdc = osd->o_osdc;
1169 verify_osdc_wrlocked(osdc);
1170 dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
1172 ceph_con_close(&osd->o_con);
1174 for (n = rb_first(&osd->o_requests); n; ) {
1175 struct ceph_osd_request *req =
1176 rb_entry(n, struct ceph_osd_request, r_node);
1178 n = rb_next(n); /* unlink_request() */
1180 dout(" reassigning req %p tid %llu\n", req, req->r_tid);
1181 unlink_request(osd, req);
1182 link_request(&osdc->homeless_osd, req);
1184 for (n = rb_first(&osd->o_linger_requests); n; ) {
1185 struct ceph_osd_linger_request *lreq =
1186 rb_entry(n, struct ceph_osd_linger_request, node);
1188 n = rb_next(n); /* unlink_linger() */
1190 dout(" reassigning lreq %p linger_id %llu\n", lreq,
1192 unlink_linger(osd, lreq);
1193 link_linger(&osdc->homeless_osd, lreq);
1195 clear_backoffs(osd);
1197 __remove_osd_from_lru(osd);
1198 erase_osd(&osdc->osds, osd);
1205 static int reopen_osd(struct ceph_osd *osd)
1207 struct ceph_entity_addr *peer_addr;
1209 dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
1211 if (RB_EMPTY_ROOT(&osd->o_requests) &&
1212 RB_EMPTY_ROOT(&osd->o_linger_requests)) {
1217 peer_addr = &osd->o_osdc->osdmap->osd_addr[osd->o_osd];
1218 if (!memcmp(peer_addr, &osd->o_con.peer_addr, sizeof (*peer_addr)) &&
1219 !ceph_con_opened(&osd->o_con)) {
1222 dout("osd addr hasn't changed and connection never opened, "
1223 "letting msgr retry\n");
1224 /* touch each r_stamp for handle_timeout()'s benfit */
1225 for (n = rb_first(&osd->o_requests); n; n = rb_next(n)) {
1226 struct ceph_osd_request *req =
1227 rb_entry(n, struct ceph_osd_request, r_node);
1228 req->r_stamp = jiffies;
1234 ceph_con_close(&osd->o_con);
1235 ceph_con_open(&osd->o_con, CEPH_ENTITY_TYPE_OSD, osd->o_osd, peer_addr);
1236 osd->o_incarnation++;
1241 static struct ceph_osd *lookup_create_osd(struct ceph_osd_client *osdc, int o,
1244 struct ceph_osd *osd;
1247 verify_osdc_wrlocked(osdc);
1249 verify_osdc_locked(osdc);
1251 if (o != CEPH_HOMELESS_OSD)
1252 osd = lookup_osd(&osdc->osds, o);
1254 osd = &osdc->homeless_osd;
1257 return ERR_PTR(-EAGAIN);
1259 osd = create_osd(osdc, o);
1260 insert_osd(&osdc->osds, osd);
1261 ceph_con_open(&osd->o_con, CEPH_ENTITY_TYPE_OSD, osd->o_osd,
1262 &osdc->osdmap->osd_addr[osd->o_osd]);
1265 dout("%s osdc %p osd%d -> osd %p\n", __func__, osdc, o, osd);
1270 * Create request <-> OSD session relation.
1272 * @req has to be assigned a tid, @osd may be homeless.
1274 static void link_request(struct ceph_osd *osd, struct ceph_osd_request *req)
1276 verify_osd_locked(osd);
1277 WARN_ON(!req->r_tid || req->r_osd);
1278 dout("%s osd %p osd%d req %p tid %llu\n", __func__, osd, osd->o_osd,
1281 if (!osd_homeless(osd))
1282 __remove_osd_from_lru(osd);
1284 atomic_inc(&osd->o_osdc->num_homeless);
1287 insert_request(&osd->o_requests, req);
1291 static void unlink_request(struct ceph_osd *osd, struct ceph_osd_request *req)
1293 verify_osd_locked(osd);
1294 WARN_ON(req->r_osd != osd);
1295 dout("%s osd %p osd%d req %p tid %llu\n", __func__, osd, osd->o_osd,
1299 erase_request(&osd->o_requests, req);
1302 if (!osd_homeless(osd))
1303 maybe_move_osd_to_lru(osd);
1305 atomic_dec(&osd->o_osdc->num_homeless);
1308 static bool __pool_full(struct ceph_pg_pool_info *pi)
1310 return pi->flags & CEPH_POOL_FLAG_FULL;
1313 static bool have_pool_full(struct ceph_osd_client *osdc)
1317 for (n = rb_first(&osdc->osdmap->pg_pools); n; n = rb_next(n)) {
1318 struct ceph_pg_pool_info *pi =
1319 rb_entry(n, struct ceph_pg_pool_info, node);
1321 if (__pool_full(pi))
1328 static bool pool_full(struct ceph_osd_client *osdc, s64 pool_id)
1330 struct ceph_pg_pool_info *pi;
1332 pi = ceph_pg_pool_by_id(osdc->osdmap, pool_id);
1336 return __pool_full(pi);
1340 * Returns whether a request should be blocked from being sent
1341 * based on the current osdmap and osd_client settings.
1343 static bool target_should_be_paused(struct ceph_osd_client *osdc,
1344 const struct ceph_osd_request_target *t,
1345 struct ceph_pg_pool_info *pi)
1347 bool pauserd = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD);
1348 bool pausewr = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR) ||
1349 ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
1352 WARN_ON(pi->id != t->target_oloc.pool);
1353 return ((t->flags & CEPH_OSD_FLAG_READ) && pauserd) ||
1354 ((t->flags & CEPH_OSD_FLAG_WRITE) && pausewr) ||
1355 (osdc->osdmap->epoch < osdc->epoch_barrier);
1358 enum calc_target_result {
1359 CALC_TARGET_NO_ACTION = 0,
1360 CALC_TARGET_NEED_RESEND,
1361 CALC_TARGET_POOL_DNE,
1364 static enum calc_target_result calc_target(struct ceph_osd_client *osdc,
1365 struct ceph_osd_request_target *t,
1366 struct ceph_connection *con,
1369 struct ceph_pg_pool_info *pi;
1370 struct ceph_pg pgid, last_pgid;
1371 struct ceph_osds up, acting;
1372 bool force_resend = false;
1373 bool unpaused = false;
1376 bool sort_bitwise = ceph_osdmap_flag(osdc, CEPH_OSDMAP_SORTBITWISE);
1377 bool recovery_deletes = ceph_osdmap_flag(osdc,
1378 CEPH_OSDMAP_RECOVERY_DELETES);
1379 enum calc_target_result ct_res;
1382 t->epoch = osdc->osdmap->epoch;
1383 pi = ceph_pg_pool_by_id(osdc->osdmap, t->base_oloc.pool);
1385 t->osd = CEPH_HOMELESS_OSD;
1386 ct_res = CALC_TARGET_POOL_DNE;
1390 if (osdc->osdmap->epoch == pi->last_force_request_resend) {
1391 if (t->last_force_resend < pi->last_force_request_resend) {
1392 t->last_force_resend = pi->last_force_request_resend;
1393 force_resend = true;
1394 } else if (t->last_force_resend == 0) {
1395 force_resend = true;
1400 ceph_oid_copy(&t->target_oid, &t->base_oid);
1401 ceph_oloc_copy(&t->target_oloc, &t->base_oloc);
1402 if ((t->flags & CEPH_OSD_FLAG_IGNORE_OVERLAY) == 0) {
1403 if (t->flags & CEPH_OSD_FLAG_READ && pi->read_tier >= 0)
1404 t->target_oloc.pool = pi->read_tier;
1405 if (t->flags & CEPH_OSD_FLAG_WRITE && pi->write_tier >= 0)
1406 t->target_oloc.pool = pi->write_tier;
1408 pi = ceph_pg_pool_by_id(osdc->osdmap, t->target_oloc.pool);
1410 t->osd = CEPH_HOMELESS_OSD;
1411 ct_res = CALC_TARGET_POOL_DNE;
1416 ret = __ceph_object_locator_to_pg(pi, &t->target_oid, &t->target_oloc,
1419 WARN_ON(ret != -ENOENT);
1420 t->osd = CEPH_HOMELESS_OSD;
1421 ct_res = CALC_TARGET_POOL_DNE;
1424 last_pgid.pool = pgid.pool;
1425 last_pgid.seed = ceph_stable_mod(pgid.seed, t->pg_num, t->pg_num_mask);
1427 ceph_pg_to_up_acting_osds(osdc->osdmap, pi, &pgid, &up, &acting);
1429 ceph_is_new_interval(&t->acting,
1441 t->recovery_deletes,
1444 force_resend = true;
1446 if (t->paused && !target_should_be_paused(osdc, t, pi)) {
1450 legacy_change = ceph_pg_compare(&t->pgid, &pgid) ||
1451 ceph_osds_changed(&t->acting, &acting, any_change);
1453 split = ceph_pg_is_split(&last_pgid, t->pg_num, pi->pg_num);
1455 if (legacy_change || force_resend || split) {
1456 t->pgid = pgid; /* struct */
1457 ceph_pg_to_primary_shard(osdc->osdmap, pi, &pgid, &t->spgid);
1458 ceph_osds_copy(&t->acting, &acting);
1459 ceph_osds_copy(&t->up, &up);
1461 t->min_size = pi->min_size;
1462 t->pg_num = pi->pg_num;
1463 t->pg_num_mask = pi->pg_num_mask;
1464 t->sort_bitwise = sort_bitwise;
1465 t->recovery_deletes = recovery_deletes;
1467 t->osd = acting.primary;
1470 if (unpaused || legacy_change || force_resend ||
1471 (split && con && CEPH_HAVE_FEATURE(con->peer_features,
1473 ct_res = CALC_TARGET_NEED_RESEND;
1475 ct_res = CALC_TARGET_NO_ACTION;
1478 dout("%s t %p -> ct_res %d osd %d\n", __func__, t, ct_res, t->osd);
1482 static struct ceph_spg_mapping *alloc_spg_mapping(void)
1484 struct ceph_spg_mapping *spg;
1486 spg = kmalloc(sizeof(*spg), GFP_NOIO);
1490 RB_CLEAR_NODE(&spg->node);
1491 spg->backoffs = RB_ROOT;
1495 static void free_spg_mapping(struct ceph_spg_mapping *spg)
1497 WARN_ON(!RB_EMPTY_NODE(&spg->node));
1498 WARN_ON(!RB_EMPTY_ROOT(&spg->backoffs));
1504 * rbtree of ceph_spg_mapping for handling map<spg_t, ...>, similar to
1505 * ceph_pg_mapping. Used to track OSD backoffs -- a backoff [range] is
1506 * defined only within a specific spgid; it does not pass anything to
1507 * children on split, or to another primary.
1509 DEFINE_RB_FUNCS2(spg_mapping, struct ceph_spg_mapping, spgid, ceph_spg_compare,
1510 RB_BYPTR, const struct ceph_spg *, node)
1512 static u64 hoid_get_bitwise_key(const struct ceph_hobject_id *hoid)
1514 return hoid->is_max ? 0x100000000ull : hoid->hash_reverse_bits;
1517 static void hoid_get_effective_key(const struct ceph_hobject_id *hoid,
1518 void **pkey, size_t *pkey_len)
1520 if (hoid->key_len) {
1522 *pkey_len = hoid->key_len;
1525 *pkey_len = hoid->oid_len;
1529 static int compare_names(const void *name1, size_t name1_len,
1530 const void *name2, size_t name2_len)
1534 ret = memcmp(name1, name2, min(name1_len, name2_len));
1536 if (name1_len < name2_len)
1538 else if (name1_len > name2_len)
1544 static int hoid_compare(const struct ceph_hobject_id *lhs,
1545 const struct ceph_hobject_id *rhs)
1547 void *effective_key1, *effective_key2;
1548 size_t effective_key1_len, effective_key2_len;
1551 if (lhs->is_max < rhs->is_max)
1553 if (lhs->is_max > rhs->is_max)
1556 if (lhs->pool < rhs->pool)
1558 if (lhs->pool > rhs->pool)
1561 if (hoid_get_bitwise_key(lhs) < hoid_get_bitwise_key(rhs))
1563 if (hoid_get_bitwise_key(lhs) > hoid_get_bitwise_key(rhs))
1566 ret = compare_names(lhs->nspace, lhs->nspace_len,
1567 rhs->nspace, rhs->nspace_len);
1571 hoid_get_effective_key(lhs, &effective_key1, &effective_key1_len);
1572 hoid_get_effective_key(rhs, &effective_key2, &effective_key2_len);
1573 ret = compare_names(effective_key1, effective_key1_len,
1574 effective_key2, effective_key2_len);
1578 ret = compare_names(lhs->oid, lhs->oid_len, rhs->oid, rhs->oid_len);
1582 if (lhs->snapid < rhs->snapid)
1584 if (lhs->snapid > rhs->snapid)
1591 * For decoding ->begin and ->end of MOSDBackoff only -- no MIN/MAX
1592 * compat stuff here.
1594 * Assumes @hoid is zero-initialized.
1596 static int decode_hoid(void **p, void *end, struct ceph_hobject_id *hoid)
1602 ret = ceph_start_decoding(p, end, 4, "hobject_t", &struct_v,
1608 pr_err("got struct_v %d < 4 of hobject_t\n", struct_v);
1612 hoid->key = ceph_extract_encoded_string(p, end, &hoid->key_len,
1614 if (IS_ERR(hoid->key)) {
1615 ret = PTR_ERR(hoid->key);
1620 hoid->oid = ceph_extract_encoded_string(p, end, &hoid->oid_len,
1622 if (IS_ERR(hoid->oid)) {
1623 ret = PTR_ERR(hoid->oid);
1628 ceph_decode_64_safe(p, end, hoid->snapid, e_inval);
1629 ceph_decode_32_safe(p, end, hoid->hash, e_inval);
1630 ceph_decode_8_safe(p, end, hoid->is_max, e_inval);
1632 hoid->nspace = ceph_extract_encoded_string(p, end, &hoid->nspace_len,
1634 if (IS_ERR(hoid->nspace)) {
1635 ret = PTR_ERR(hoid->nspace);
1636 hoid->nspace = NULL;
1640 ceph_decode_64_safe(p, end, hoid->pool, e_inval);
1642 ceph_hoid_build_hash_cache(hoid);
1649 static int hoid_encoding_size(const struct ceph_hobject_id *hoid)
1651 return 8 + 4 + 1 + 8 + /* snapid, hash, is_max, pool */
1652 4 + hoid->key_len + 4 + hoid->oid_len + 4 + hoid->nspace_len;
1655 static void encode_hoid(void **p, void *end, const struct ceph_hobject_id *hoid)
1657 ceph_start_encoding(p, 4, 3, hoid_encoding_size(hoid));
1658 ceph_encode_string(p, end, hoid->key, hoid->key_len);
1659 ceph_encode_string(p, end, hoid->oid, hoid->oid_len);
1660 ceph_encode_64(p, hoid->snapid);
1661 ceph_encode_32(p, hoid->hash);
1662 ceph_encode_8(p, hoid->is_max);
1663 ceph_encode_string(p, end, hoid->nspace, hoid->nspace_len);
1664 ceph_encode_64(p, hoid->pool);
1667 static void free_hoid(struct ceph_hobject_id *hoid)
1672 kfree(hoid->nspace);
1677 static struct ceph_osd_backoff *alloc_backoff(void)
1679 struct ceph_osd_backoff *backoff;
1681 backoff = kzalloc(sizeof(*backoff), GFP_NOIO);
1685 RB_CLEAR_NODE(&backoff->spg_node);
1686 RB_CLEAR_NODE(&backoff->id_node);
1690 static void free_backoff(struct ceph_osd_backoff *backoff)
1692 WARN_ON(!RB_EMPTY_NODE(&backoff->spg_node));
1693 WARN_ON(!RB_EMPTY_NODE(&backoff->id_node));
1695 free_hoid(backoff->begin);
1696 free_hoid(backoff->end);
1701 * Within a specific spgid, backoffs are managed by ->begin hoid.
1703 DEFINE_RB_INSDEL_FUNCS2(backoff, struct ceph_osd_backoff, begin, hoid_compare,
1704 RB_BYVAL, spg_node);
1706 static struct ceph_osd_backoff *lookup_containing_backoff(struct rb_root *root,
1707 const struct ceph_hobject_id *hoid)
1709 struct rb_node *n = root->rb_node;
1712 struct ceph_osd_backoff *cur =
1713 rb_entry(n, struct ceph_osd_backoff, spg_node);
1716 cmp = hoid_compare(hoid, cur->begin);
1719 } else if (cmp > 0) {
1720 if (hoid_compare(hoid, cur->end) < 0)
1733 * Each backoff has a unique id within its OSD session.
1735 DEFINE_RB_FUNCS(backoff_by_id, struct ceph_osd_backoff, id, id_node)
1737 static void clear_backoffs(struct ceph_osd *osd)
1739 while (!RB_EMPTY_ROOT(&osd->o_backoff_mappings)) {
1740 struct ceph_spg_mapping *spg =
1741 rb_entry(rb_first(&osd->o_backoff_mappings),
1742 struct ceph_spg_mapping, node);
1744 while (!RB_EMPTY_ROOT(&spg->backoffs)) {
1745 struct ceph_osd_backoff *backoff =
1746 rb_entry(rb_first(&spg->backoffs),
1747 struct ceph_osd_backoff, spg_node);
1749 erase_backoff(&spg->backoffs, backoff);
1750 erase_backoff_by_id(&osd->o_backoffs_by_id, backoff);
1751 free_backoff(backoff);
1753 erase_spg_mapping(&osd->o_backoff_mappings, spg);
1754 free_spg_mapping(spg);
1759 * Set up a temporary, non-owning view into @t.
1761 static void hoid_fill_from_target(struct ceph_hobject_id *hoid,
1762 const struct ceph_osd_request_target *t)
1766 hoid->oid = t->target_oid.name;
1767 hoid->oid_len = t->target_oid.name_len;
1768 hoid->snapid = CEPH_NOSNAP;
1769 hoid->hash = t->pgid.seed;
1770 hoid->is_max = false;
1771 if (t->target_oloc.pool_ns) {
1772 hoid->nspace = t->target_oloc.pool_ns->str;
1773 hoid->nspace_len = t->target_oloc.pool_ns->len;
1775 hoid->nspace = NULL;
1776 hoid->nspace_len = 0;
1778 hoid->pool = t->target_oloc.pool;
1779 ceph_hoid_build_hash_cache(hoid);
1782 static bool should_plug_request(struct ceph_osd_request *req)
1784 struct ceph_osd *osd = req->r_osd;
1785 struct ceph_spg_mapping *spg;
1786 struct ceph_osd_backoff *backoff;
1787 struct ceph_hobject_id hoid;
1789 spg = lookup_spg_mapping(&osd->o_backoff_mappings, &req->r_t.spgid);
1793 hoid_fill_from_target(&hoid, &req->r_t);
1794 backoff = lookup_containing_backoff(&spg->backoffs, &hoid);
1798 dout("%s req %p tid %llu backoff osd%d spgid %llu.%xs%d id %llu\n",
1799 __func__, req, req->r_tid, osd->o_osd, backoff->spgid.pgid.pool,
1800 backoff->spgid.pgid.seed, backoff->spgid.shard, backoff->id);
1804 static void setup_request_data(struct ceph_osd_request *req,
1805 struct ceph_msg *msg)
1810 if (!list_empty(&msg->data))
1813 WARN_ON(msg->data_length);
1814 for (i = 0; i < req->r_num_ops; i++) {
1815 struct ceph_osd_req_op *op = &req->r_ops[i];
1819 case CEPH_OSD_OP_WRITE:
1820 case CEPH_OSD_OP_WRITEFULL:
1821 WARN_ON(op->indata_len != op->extent.length);
1822 ceph_osdc_msg_data_add(msg, &op->extent.osd_data);
1824 case CEPH_OSD_OP_SETXATTR:
1825 case CEPH_OSD_OP_CMPXATTR:
1826 WARN_ON(op->indata_len != op->xattr.name_len +
1827 op->xattr.value_len);
1828 ceph_osdc_msg_data_add(msg, &op->xattr.osd_data);
1830 case CEPH_OSD_OP_NOTIFY_ACK:
1831 ceph_osdc_msg_data_add(msg,
1832 &op->notify_ack.request_data);
1836 case CEPH_OSD_OP_STAT:
1837 ceph_osdc_msg_data_add(req->r_reply,
1840 case CEPH_OSD_OP_READ:
1841 ceph_osdc_msg_data_add(req->r_reply,
1842 &op->extent.osd_data);
1844 case CEPH_OSD_OP_LIST_WATCHERS:
1845 ceph_osdc_msg_data_add(req->r_reply,
1846 &op->list_watchers.response_data);
1850 case CEPH_OSD_OP_CALL:
1851 WARN_ON(op->indata_len != op->cls.class_len +
1852 op->cls.method_len +
1853 op->cls.indata_len);
1854 ceph_osdc_msg_data_add(msg, &op->cls.request_info);
1855 /* optional, can be NONE */
1856 ceph_osdc_msg_data_add(msg, &op->cls.request_data);
1857 /* optional, can be NONE */
1858 ceph_osdc_msg_data_add(req->r_reply,
1859 &op->cls.response_data);
1861 case CEPH_OSD_OP_NOTIFY:
1862 ceph_osdc_msg_data_add(msg,
1863 &op->notify.request_data);
1864 ceph_osdc_msg_data_add(req->r_reply,
1865 &op->notify.response_data);
1869 data_len += op->indata_len;
1872 WARN_ON(data_len != msg->data_length);
1875 static void encode_pgid(void **p, const struct ceph_pg *pgid)
1877 ceph_encode_8(p, 1);
1878 ceph_encode_64(p, pgid->pool);
1879 ceph_encode_32(p, pgid->seed);
1880 ceph_encode_32(p, -1); /* preferred */
1883 static void encode_spgid(void **p, const struct ceph_spg *spgid)
1885 ceph_start_encoding(p, 1, 1, CEPH_PGID_ENCODING_LEN + 1);
1886 encode_pgid(p, &spgid->pgid);
1887 ceph_encode_8(p, spgid->shard);
1890 static void encode_oloc(void **p, void *end,
1891 const struct ceph_object_locator *oloc)
1893 ceph_start_encoding(p, 5, 4, ceph_oloc_encoding_size(oloc));
1894 ceph_encode_64(p, oloc->pool);
1895 ceph_encode_32(p, -1); /* preferred */
1896 ceph_encode_32(p, 0); /* key len */
1898 ceph_encode_string(p, end, oloc->pool_ns->str,
1899 oloc->pool_ns->len);
1901 ceph_encode_32(p, 0);
1904 static void encode_request_partial(struct ceph_osd_request *req,
1905 struct ceph_msg *msg)
1907 void *p = msg->front.iov_base;
1908 void *const end = p + msg->front_alloc_len;
1912 if (req->r_flags & CEPH_OSD_FLAG_WRITE) {
1913 /* snapshots aren't writeable */
1914 WARN_ON(req->r_snapid != CEPH_NOSNAP);
1916 WARN_ON(req->r_mtime.tv_sec || req->r_mtime.tv_nsec ||
1917 req->r_data_offset || req->r_snapc);
1920 setup_request_data(req, msg);
1922 encode_spgid(&p, &req->r_t.spgid); /* actual spg */
1923 ceph_encode_32(&p, req->r_t.pgid.seed); /* raw hash */
1924 ceph_encode_32(&p, req->r_osdc->osdmap->epoch);
1925 ceph_encode_32(&p, req->r_flags);
1928 ceph_start_encoding(&p, 2, 2, sizeof(struct ceph_osd_reqid));
1929 memset(p, 0, sizeof(struct ceph_osd_reqid));
1930 p += sizeof(struct ceph_osd_reqid);
1933 memset(p, 0, sizeof(struct ceph_blkin_trace_info));
1934 p += sizeof(struct ceph_blkin_trace_info);
1936 ceph_encode_32(&p, 0); /* client_inc, always 0 */
1937 ceph_encode_timespec(p, &req->r_mtime);
1938 p += sizeof(struct ceph_timespec);
1940 encode_oloc(&p, end, &req->r_t.target_oloc);
1941 ceph_encode_string(&p, end, req->r_t.target_oid.name,
1942 req->r_t.target_oid.name_len);
1944 /* ops, can imply data */
1945 ceph_encode_16(&p, req->r_num_ops);
1946 for (i = 0; i < req->r_num_ops; i++) {
1947 data_len += osd_req_encode_op(p, &req->r_ops[i]);
1948 p += sizeof(struct ceph_osd_op);
1951 ceph_encode_64(&p, req->r_snapid); /* snapid */
1953 ceph_encode_64(&p, req->r_snapc->seq);
1954 ceph_encode_32(&p, req->r_snapc->num_snaps);
1955 for (i = 0; i < req->r_snapc->num_snaps; i++)
1956 ceph_encode_64(&p, req->r_snapc->snaps[i]);
1958 ceph_encode_64(&p, 0); /* snap_seq */
1959 ceph_encode_32(&p, 0); /* snaps len */
1962 ceph_encode_32(&p, req->r_attempts); /* retry_attempt */
1963 BUG_ON(p > end - 8); /* space for features */
1965 msg->hdr.version = cpu_to_le16(8); /* MOSDOp v8 */
1966 /* front_len is finalized in encode_request_finish() */
1967 msg->front.iov_len = p - msg->front.iov_base;
1968 msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
1969 msg->hdr.data_len = cpu_to_le32(data_len);
1971 * The header "data_off" is a hint to the receiver allowing it
1972 * to align received data into its buffers such that there's no
1973 * need to re-copy it before writing it to disk (direct I/O).
1975 msg->hdr.data_off = cpu_to_le16(req->r_data_offset);
1977 dout("%s req %p msg %p oid %s oid_len %d\n", __func__, req, msg,
1978 req->r_t.target_oid.name, req->r_t.target_oid.name_len);
1981 static void encode_request_finish(struct ceph_msg *msg)
1983 void *p = msg->front.iov_base;
1984 void *const partial_end = p + msg->front.iov_len;
1985 void *const end = p + msg->front_alloc_len;
1987 if (CEPH_HAVE_FEATURE(msg->con->peer_features, RESEND_ON_SPLIT)) {
1988 /* luminous OSD -- encode features and be done */
1990 ceph_encode_64(&p, msg->con->peer_features);
1993 char spgid[CEPH_ENCODING_START_BLK_LEN +
1994 CEPH_PGID_ENCODING_LEN + 1];
1998 char reqid[CEPH_ENCODING_START_BLK_LEN +
1999 sizeof(struct ceph_osd_reqid)];
2000 char trace[sizeof(struct ceph_blkin_trace_info)];
2002 struct ceph_timespec mtime;
2004 struct ceph_pg pgid;
2005 void *oloc, *oid, *tail;
2006 int oloc_len, oid_len, tail_len;
2010 * Pre-luminous OSD -- reencode v8 into v4 using @head
2011 * as a temporary buffer. Encode the raw PG; the rest
2012 * is just a matter of moving oloc, oid and tail blobs
2015 memcpy(&head, p, sizeof(head));
2019 p += CEPH_ENCODING_START_BLK_LEN;
2020 pgid.pool = ceph_decode_64(&p);
2021 p += 4 + 4; /* preferred, key len */
2022 len = ceph_decode_32(&p);
2023 p += len; /* nspace */
2024 oloc_len = p - oloc;
2027 len = ceph_decode_32(&p);
2032 tail_len = partial_end - p;
2034 p = msg->front.iov_base;
2035 ceph_encode_copy(&p, &head.client_inc, sizeof(head.client_inc));
2036 ceph_encode_copy(&p, &head.epoch, sizeof(head.epoch));
2037 ceph_encode_copy(&p, &head.flags, sizeof(head.flags));
2038 ceph_encode_copy(&p, &head.mtime, sizeof(head.mtime));
2040 /* reassert_version */
2041 memset(p, 0, sizeof(struct ceph_eversion));
2042 p += sizeof(struct ceph_eversion);
2045 memmove(p, oloc, oloc_len);
2048 pgid.seed = le32_to_cpu(head.hash);
2049 encode_pgid(&p, &pgid); /* raw pg */
2052 memmove(p, oid, oid_len);
2055 /* tail -- ops, snapid, snapc, retry_attempt */
2057 memmove(p, tail, tail_len);
2060 msg->hdr.version = cpu_to_le16(4); /* MOSDOp v4 */
2064 msg->front.iov_len = p - msg->front.iov_base;
2065 msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
2067 dout("%s msg %p tid %llu %u+%u+%u v%d\n", __func__, msg,
2068 le64_to_cpu(msg->hdr.tid), le32_to_cpu(msg->hdr.front_len),
2069 le32_to_cpu(msg->hdr.middle_len), le32_to_cpu(msg->hdr.data_len),
2070 le16_to_cpu(msg->hdr.version));
2074 * @req has to be assigned a tid and registered.
2076 static void send_request(struct ceph_osd_request *req)
2078 struct ceph_osd *osd = req->r_osd;
2080 verify_osd_locked(osd);
2081 WARN_ON(osd->o_osd != req->r_t.osd);
2084 if (should_plug_request(req))
2088 * We may have a previously queued request message hanging
2089 * around. Cancel it to avoid corrupting the msgr.
2092 ceph_msg_revoke(req->r_request);
2094 req->r_flags |= CEPH_OSD_FLAG_KNOWN_REDIR;
2095 if (req->r_attempts)
2096 req->r_flags |= CEPH_OSD_FLAG_RETRY;
2098 WARN_ON(req->r_flags & CEPH_OSD_FLAG_RETRY);
2100 encode_request_partial(req, req->r_request);
2102 dout("%s req %p tid %llu to pgid %llu.%x spgid %llu.%xs%d osd%d e%u flags 0x%x attempt %d\n",
2103 __func__, req, req->r_tid, req->r_t.pgid.pool, req->r_t.pgid.seed,
2104 req->r_t.spgid.pgid.pool, req->r_t.spgid.pgid.seed,
2105 req->r_t.spgid.shard, osd->o_osd, req->r_t.epoch, req->r_flags,
2108 req->r_t.paused = false;
2109 req->r_stamp = jiffies;
2112 req->r_sent = osd->o_incarnation;
2113 req->r_request->hdr.tid = cpu_to_le64(req->r_tid);
2114 ceph_con_send(&osd->o_con, ceph_msg_get(req->r_request));
2117 static void maybe_request_map(struct ceph_osd_client *osdc)
2119 bool continuous = false;
2121 verify_osdc_locked(osdc);
2122 WARN_ON(!osdc->osdmap->epoch);
2124 if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
2125 ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD) ||
2126 ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR)) {
2127 dout("%s osdc %p continuous\n", __func__, osdc);
2130 dout("%s osdc %p onetime\n", __func__, osdc);
2133 if (ceph_monc_want_map(&osdc->client->monc, CEPH_SUB_OSDMAP,
2134 osdc->osdmap->epoch + 1, continuous))
2135 ceph_monc_renew_subs(&osdc->client->monc);
2138 static void complete_request(struct ceph_osd_request *req, int err);
2139 static void send_map_check(struct ceph_osd_request *req);
2141 static void __submit_request(struct ceph_osd_request *req, bool wrlocked)
2143 struct ceph_osd_client *osdc = req->r_osdc;
2144 struct ceph_osd *osd;
2145 enum calc_target_result ct_res;
2146 bool need_send = false;
2147 bool promoted = false;
2148 bool need_abort = false;
2150 WARN_ON(req->r_tid);
2151 dout("%s req %p wrlocked %d\n", __func__, req, wrlocked);
2154 ct_res = calc_target(osdc, &req->r_t, NULL, false);
2155 if (ct_res == CALC_TARGET_POOL_DNE && !wrlocked)
2158 osd = lookup_create_osd(osdc, req->r_t.osd, wrlocked);
2160 WARN_ON(PTR_ERR(osd) != -EAGAIN || wrlocked);
2164 if (osdc->osdmap->epoch < osdc->epoch_barrier) {
2165 dout("req %p epoch %u barrier %u\n", req, osdc->osdmap->epoch,
2166 osdc->epoch_barrier);
2167 req->r_t.paused = true;
2168 maybe_request_map(osdc);
2169 } else if ((req->r_flags & CEPH_OSD_FLAG_WRITE) &&
2170 ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR)) {
2171 dout("req %p pausewr\n", req);
2172 req->r_t.paused = true;
2173 maybe_request_map(osdc);
2174 } else if ((req->r_flags & CEPH_OSD_FLAG_READ) &&
2175 ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD)) {
2176 dout("req %p pauserd\n", req);
2177 req->r_t.paused = true;
2178 maybe_request_map(osdc);
2179 } else if ((req->r_flags & CEPH_OSD_FLAG_WRITE) &&
2180 !(req->r_flags & (CEPH_OSD_FLAG_FULL_TRY |
2181 CEPH_OSD_FLAG_FULL_FORCE)) &&
2182 (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
2183 pool_full(osdc, req->r_t.base_oloc.pool))) {
2184 dout("req %p full/pool_full\n", req);
2185 pr_warn_ratelimited("FULL or reached pool quota\n");
2186 req->r_t.paused = true;
2187 maybe_request_map(osdc);
2188 if (req->r_abort_on_full)
2190 } else if (!osd_homeless(osd)) {
2193 maybe_request_map(osdc);
2196 mutex_lock(&osd->lock);
2198 * Assign the tid atomically with send_request() to protect
2199 * multiple writes to the same object from racing with each
2200 * other, resulting in out of order ops on the OSDs.
2202 req->r_tid = atomic64_inc_return(&osdc->last_tid);
2203 link_request(osd, req);
2206 else if (need_abort)
2207 complete_request(req, -ENOSPC);
2208 mutex_unlock(&osd->lock);
2210 if (ct_res == CALC_TARGET_POOL_DNE)
2211 send_map_check(req);
2214 downgrade_write(&osdc->lock);
2218 up_read(&osdc->lock);
2219 down_write(&osdc->lock);
2225 static void account_request(struct ceph_osd_request *req)
2227 WARN_ON(req->r_flags & (CEPH_OSD_FLAG_ACK | CEPH_OSD_FLAG_ONDISK));
2228 WARN_ON(!(req->r_flags & (CEPH_OSD_FLAG_READ | CEPH_OSD_FLAG_WRITE)));
2230 req->r_flags |= CEPH_OSD_FLAG_ONDISK;
2231 atomic_inc(&req->r_osdc->num_requests);
2233 req->r_start_stamp = jiffies;
2236 static void submit_request(struct ceph_osd_request *req, bool wrlocked)
2238 ceph_osdc_get_request(req);
2239 account_request(req);
2240 __submit_request(req, wrlocked);
2243 static void finish_request(struct ceph_osd_request *req)
2245 struct ceph_osd_client *osdc = req->r_osdc;
2247 WARN_ON(lookup_request_mc(&osdc->map_checks, req->r_tid));
2248 dout("%s req %p tid %llu\n", __func__, req, req->r_tid);
2251 unlink_request(req->r_osd, req);
2252 atomic_dec(&osdc->num_requests);
2255 * If an OSD has failed or returned and a request has been sent
2256 * twice, it's possible to get a reply and end up here while the
2257 * request message is queued for delivery. We will ignore the
2258 * reply, so not a big deal, but better to try and catch it.
2260 ceph_msg_revoke(req->r_request);
2261 ceph_msg_revoke_incoming(req->r_reply);
2264 static void __complete_request(struct ceph_osd_request *req)
2266 if (req->r_callback) {
2267 dout("%s req %p tid %llu cb %pf result %d\n", __func__, req,
2268 req->r_tid, req->r_callback, req->r_result);
2269 req->r_callback(req);
2274 * This is open-coded in handle_reply().
2276 static void complete_request(struct ceph_osd_request *req, int err)
2278 dout("%s req %p tid %llu err %d\n", __func__, req, req->r_tid, err);
2280 req->r_result = err;
2281 finish_request(req);
2282 __complete_request(req);
2283 complete_all(&req->r_completion);
2284 ceph_osdc_put_request(req);
2287 static void cancel_map_check(struct ceph_osd_request *req)
2289 struct ceph_osd_client *osdc = req->r_osdc;
2290 struct ceph_osd_request *lookup_req;
2292 verify_osdc_wrlocked(osdc);
2294 lookup_req = lookup_request_mc(&osdc->map_checks, req->r_tid);
2298 WARN_ON(lookup_req != req);
2299 erase_request_mc(&osdc->map_checks, req);
2300 ceph_osdc_put_request(req);
2303 static void cancel_request(struct ceph_osd_request *req)
2305 dout("%s req %p tid %llu\n", __func__, req, req->r_tid);
2307 cancel_map_check(req);
2308 finish_request(req);
2309 complete_all(&req->r_completion);
2310 ceph_osdc_put_request(req);
2313 static void abort_request(struct ceph_osd_request *req, int err)
2315 dout("%s req %p tid %llu err %d\n", __func__, req, req->r_tid, err);
2317 cancel_map_check(req);
2318 complete_request(req, err);
2321 static void update_epoch_barrier(struct ceph_osd_client *osdc, u32 eb)
2323 if (likely(eb > osdc->epoch_barrier)) {
2324 dout("updating epoch_barrier from %u to %u\n",
2325 osdc->epoch_barrier, eb);
2326 osdc->epoch_barrier = eb;
2327 /* Request map if we're not to the barrier yet */
2328 if (eb > osdc->osdmap->epoch)
2329 maybe_request_map(osdc);
2333 void ceph_osdc_update_epoch_barrier(struct ceph_osd_client *osdc, u32 eb)
2335 down_read(&osdc->lock);
2336 if (unlikely(eb > osdc->epoch_barrier)) {
2337 up_read(&osdc->lock);
2338 down_write(&osdc->lock);
2339 update_epoch_barrier(osdc, eb);
2340 up_write(&osdc->lock);
2342 up_read(&osdc->lock);
2345 EXPORT_SYMBOL(ceph_osdc_update_epoch_barrier);
2348 * Drop all pending requests that are stalled waiting on a full condition to
2349 * clear, and complete them with ENOSPC as the return code. Set the
2350 * osdc->epoch_barrier to the latest map epoch that we've seen if any were
2353 static void ceph_osdc_abort_on_full(struct ceph_osd_client *osdc)
2356 bool victims = false;
2358 dout("enter abort_on_full\n");
2360 if (!ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) && !have_pool_full(osdc))
2363 /* Scan list and see if there is anything to abort */
2364 for (n = rb_first(&osdc->osds); n; n = rb_next(n)) {
2365 struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
2368 m = rb_first(&osd->o_requests);
2370 struct ceph_osd_request *req = rb_entry(m,
2371 struct ceph_osd_request, r_node);
2374 if (req->r_abort_on_full) {
2387 * Update the barrier to current epoch if it's behind that point,
2388 * since we know we have some calls to be aborted in the tree.
2390 update_epoch_barrier(osdc, osdc->osdmap->epoch);
2392 for (n = rb_first(&osdc->osds); n; n = rb_next(n)) {
2393 struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
2396 m = rb_first(&osd->o_requests);
2398 struct ceph_osd_request *req = rb_entry(m,
2399 struct ceph_osd_request, r_node);
2402 if (req->r_abort_on_full &&
2403 (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
2404 pool_full(osdc, req->r_t.target_oloc.pool)))
2405 abort_request(req, -ENOSPC);
2409 dout("return abort_on_full barrier=%u\n", osdc->epoch_barrier);
2412 static void check_pool_dne(struct ceph_osd_request *req)
2414 struct ceph_osd_client *osdc = req->r_osdc;
2415 struct ceph_osdmap *map = osdc->osdmap;
2417 verify_osdc_wrlocked(osdc);
2418 WARN_ON(!map->epoch);
2420 if (req->r_attempts) {
2422 * We sent a request earlier, which means that
2423 * previously the pool existed, and now it does not
2424 * (i.e., it was deleted).
2426 req->r_map_dne_bound = map->epoch;
2427 dout("%s req %p tid %llu pool disappeared\n", __func__, req,
2430 dout("%s req %p tid %llu map_dne_bound %u have %u\n", __func__,
2431 req, req->r_tid, req->r_map_dne_bound, map->epoch);
2434 if (req->r_map_dne_bound) {
2435 if (map->epoch >= req->r_map_dne_bound) {
2436 /* we had a new enough map */
2437 pr_info_ratelimited("tid %llu pool does not exist\n",
2439 complete_request(req, -ENOENT);
2442 send_map_check(req);
2446 static void map_check_cb(struct ceph_mon_generic_request *greq)
2448 struct ceph_osd_client *osdc = &greq->monc->client->osdc;
2449 struct ceph_osd_request *req;
2450 u64 tid = greq->private_data;
2452 WARN_ON(greq->result || !greq->u.newest);
2454 down_write(&osdc->lock);
2455 req = lookup_request_mc(&osdc->map_checks, tid);
2457 dout("%s tid %llu dne\n", __func__, tid);
2461 dout("%s req %p tid %llu map_dne_bound %u newest %llu\n", __func__,
2462 req, req->r_tid, req->r_map_dne_bound, greq->u.newest);
2463 if (!req->r_map_dne_bound)
2464 req->r_map_dne_bound = greq->u.newest;
2465 erase_request_mc(&osdc->map_checks, req);
2466 check_pool_dne(req);
2468 ceph_osdc_put_request(req);
2470 up_write(&osdc->lock);
2473 static void send_map_check(struct ceph_osd_request *req)
2475 struct ceph_osd_client *osdc = req->r_osdc;
2476 struct ceph_osd_request *lookup_req;
2479 verify_osdc_wrlocked(osdc);
2481 lookup_req = lookup_request_mc(&osdc->map_checks, req->r_tid);
2483 WARN_ON(lookup_req != req);
2487 ceph_osdc_get_request(req);
2488 insert_request_mc(&osdc->map_checks, req);
2489 ret = ceph_monc_get_version_async(&osdc->client->monc, "osdmap",
2490 map_check_cb, req->r_tid);
2495 * lingering requests, watch/notify v2 infrastructure
2497 static void linger_release(struct kref *kref)
2499 struct ceph_osd_linger_request *lreq =
2500 container_of(kref, struct ceph_osd_linger_request, kref);
2502 dout("%s lreq %p reg_req %p ping_req %p\n", __func__, lreq,
2503 lreq->reg_req, lreq->ping_req);
2504 WARN_ON(!RB_EMPTY_NODE(&lreq->node));
2505 WARN_ON(!RB_EMPTY_NODE(&lreq->osdc_node));
2506 WARN_ON(!RB_EMPTY_NODE(&lreq->mc_node));
2507 WARN_ON(!list_empty(&lreq->scan_item));
2508 WARN_ON(!list_empty(&lreq->pending_lworks));
2512 ceph_osdc_put_request(lreq->reg_req);
2514 ceph_osdc_put_request(lreq->ping_req);
2515 target_destroy(&lreq->t);
2519 static void linger_put(struct ceph_osd_linger_request *lreq)
2522 kref_put(&lreq->kref, linger_release);
2525 static struct ceph_osd_linger_request *
2526 linger_get(struct ceph_osd_linger_request *lreq)
2528 kref_get(&lreq->kref);
2532 static struct ceph_osd_linger_request *
2533 linger_alloc(struct ceph_osd_client *osdc)
2535 struct ceph_osd_linger_request *lreq;
2537 lreq = kzalloc(sizeof(*lreq), GFP_NOIO);
2541 kref_init(&lreq->kref);
2542 mutex_init(&lreq->lock);
2543 RB_CLEAR_NODE(&lreq->node);
2544 RB_CLEAR_NODE(&lreq->osdc_node);
2545 RB_CLEAR_NODE(&lreq->mc_node);
2546 INIT_LIST_HEAD(&lreq->scan_item);
2547 INIT_LIST_HEAD(&lreq->pending_lworks);
2548 init_completion(&lreq->reg_commit_wait);
2549 init_completion(&lreq->notify_finish_wait);
2552 target_init(&lreq->t);
2554 dout("%s lreq %p\n", __func__, lreq);
2558 DEFINE_RB_INSDEL_FUNCS(linger, struct ceph_osd_linger_request, linger_id, node)
2559 DEFINE_RB_FUNCS(linger_osdc, struct ceph_osd_linger_request, linger_id, osdc_node)
2560 DEFINE_RB_FUNCS(linger_mc, struct ceph_osd_linger_request, linger_id, mc_node)
2563 * Create linger request <-> OSD session relation.
2565 * @lreq has to be registered, @osd may be homeless.
2567 static void link_linger(struct ceph_osd *osd,
2568 struct ceph_osd_linger_request *lreq)
2570 verify_osd_locked(osd);
2571 WARN_ON(!lreq->linger_id || lreq->osd);
2572 dout("%s osd %p osd%d lreq %p linger_id %llu\n", __func__, osd,
2573 osd->o_osd, lreq, lreq->linger_id);
2575 if (!osd_homeless(osd))
2576 __remove_osd_from_lru(osd);
2578 atomic_inc(&osd->o_osdc->num_homeless);
2581 insert_linger(&osd->o_linger_requests, lreq);
2585 static void unlink_linger(struct ceph_osd *osd,
2586 struct ceph_osd_linger_request *lreq)
2588 verify_osd_locked(osd);
2589 WARN_ON(lreq->osd != osd);
2590 dout("%s osd %p osd%d lreq %p linger_id %llu\n", __func__, osd,
2591 osd->o_osd, lreq, lreq->linger_id);
2594 erase_linger(&osd->o_linger_requests, lreq);
2597 if (!osd_homeless(osd))
2598 maybe_move_osd_to_lru(osd);
2600 atomic_dec(&osd->o_osdc->num_homeless);
2603 static bool __linger_registered(struct ceph_osd_linger_request *lreq)
2605 verify_osdc_locked(lreq->osdc);
2607 return !RB_EMPTY_NODE(&lreq->osdc_node);
2610 static bool linger_registered(struct ceph_osd_linger_request *lreq)
2612 struct ceph_osd_client *osdc = lreq->osdc;
2615 down_read(&osdc->lock);
2616 registered = __linger_registered(lreq);
2617 up_read(&osdc->lock);
2622 static void linger_register(struct ceph_osd_linger_request *lreq)
2624 struct ceph_osd_client *osdc = lreq->osdc;
2626 verify_osdc_wrlocked(osdc);
2627 WARN_ON(lreq->linger_id);
2630 lreq->linger_id = ++osdc->last_linger_id;
2631 insert_linger_osdc(&osdc->linger_requests, lreq);
2634 static void linger_unregister(struct ceph_osd_linger_request *lreq)
2636 struct ceph_osd_client *osdc = lreq->osdc;
2638 verify_osdc_wrlocked(osdc);
2640 erase_linger_osdc(&osdc->linger_requests, lreq);
2644 static void cancel_linger_request(struct ceph_osd_request *req)
2646 struct ceph_osd_linger_request *lreq = req->r_priv;
2648 WARN_ON(!req->r_linger);
2649 cancel_request(req);
2653 struct linger_work {
2654 struct work_struct work;
2655 struct ceph_osd_linger_request *lreq;
2656 struct list_head pending_item;
2657 unsigned long queued_stamp;
2663 void *payload; /* points into @msg front */
2666 struct ceph_msg *msg; /* for ceph_msg_put() */
2674 static struct linger_work *lwork_alloc(struct ceph_osd_linger_request *lreq,
2677 struct linger_work *lwork;
2679 lwork = kzalloc(sizeof(*lwork), GFP_NOIO);
2683 INIT_WORK(&lwork->work, workfn);
2684 INIT_LIST_HEAD(&lwork->pending_item);
2685 lwork->lreq = linger_get(lreq);
2690 static void lwork_free(struct linger_work *lwork)
2692 struct ceph_osd_linger_request *lreq = lwork->lreq;
2694 mutex_lock(&lreq->lock);
2695 list_del(&lwork->pending_item);
2696 mutex_unlock(&lreq->lock);
2702 static void lwork_queue(struct linger_work *lwork)
2704 struct ceph_osd_linger_request *lreq = lwork->lreq;
2705 struct ceph_osd_client *osdc = lreq->osdc;
2707 verify_lreq_locked(lreq);
2708 WARN_ON(!list_empty(&lwork->pending_item));
2710 lwork->queued_stamp = jiffies;
2711 list_add_tail(&lwork->pending_item, &lreq->pending_lworks);
2712 queue_work(osdc->notify_wq, &lwork->work);
2715 static void do_watch_notify(struct work_struct *w)
2717 struct linger_work *lwork = container_of(w, struct linger_work, work);
2718 struct ceph_osd_linger_request *lreq = lwork->lreq;
2720 if (!linger_registered(lreq)) {
2721 dout("%s lreq %p not registered\n", __func__, lreq);
2725 WARN_ON(!lreq->is_watch);
2726 dout("%s lreq %p notify_id %llu notifier_id %llu payload_len %zu\n",
2727 __func__, lreq, lwork->notify.notify_id, lwork->notify.notifier_id,
2728 lwork->notify.payload_len);
2729 lreq->wcb(lreq->data, lwork->notify.notify_id, lreq->linger_id,
2730 lwork->notify.notifier_id, lwork->notify.payload,
2731 lwork->notify.payload_len);
2734 ceph_msg_put(lwork->notify.msg);
2738 static void do_watch_error(struct work_struct *w)
2740 struct linger_work *lwork = container_of(w, struct linger_work, work);
2741 struct ceph_osd_linger_request *lreq = lwork->lreq;
2743 if (!linger_registered(lreq)) {
2744 dout("%s lreq %p not registered\n", __func__, lreq);
2748 dout("%s lreq %p err %d\n", __func__, lreq, lwork->error.err);
2749 lreq->errcb(lreq->data, lreq->linger_id, lwork->error.err);
2755 static void queue_watch_error(struct ceph_osd_linger_request *lreq)
2757 struct linger_work *lwork;
2759 lwork = lwork_alloc(lreq, do_watch_error);
2761 pr_err("failed to allocate error-lwork\n");
2765 lwork->error.err = lreq->last_error;
2769 static void linger_reg_commit_complete(struct ceph_osd_linger_request *lreq,
2772 if (!completion_done(&lreq->reg_commit_wait)) {
2773 lreq->reg_commit_error = (result <= 0 ? result : 0);
2774 complete_all(&lreq->reg_commit_wait);
2778 static void linger_commit_cb(struct ceph_osd_request *req)
2780 struct ceph_osd_linger_request *lreq = req->r_priv;
2782 mutex_lock(&lreq->lock);
2783 dout("%s lreq %p linger_id %llu result %d\n", __func__, lreq,
2784 lreq->linger_id, req->r_result);
2785 linger_reg_commit_complete(lreq, req->r_result);
2786 lreq->committed = true;
2788 if (!lreq->is_watch) {
2789 struct ceph_osd_data *osd_data =
2790 osd_req_op_data(req, 0, notify, response_data);
2791 void *p = page_address(osd_data->pages[0]);
2793 WARN_ON(req->r_ops[0].op != CEPH_OSD_OP_NOTIFY ||
2794 osd_data->type != CEPH_OSD_DATA_TYPE_PAGES);
2796 /* make note of the notify_id */
2797 if (req->r_ops[0].outdata_len >= sizeof(u64)) {
2798 lreq->notify_id = ceph_decode_64(&p);
2799 dout("lreq %p notify_id %llu\n", lreq,
2802 dout("lreq %p no notify_id\n", lreq);
2806 mutex_unlock(&lreq->lock);
2810 static int normalize_watch_error(int err)
2813 * Translate ENOENT -> ENOTCONN so that a delete->disconnection
2814 * notification and a failure to reconnect because we raced with
2815 * the delete appear the same to the user.
2823 static void linger_reconnect_cb(struct ceph_osd_request *req)
2825 struct ceph_osd_linger_request *lreq = req->r_priv;
2827 mutex_lock(&lreq->lock);
2828 dout("%s lreq %p linger_id %llu result %d last_error %d\n", __func__,
2829 lreq, lreq->linger_id, req->r_result, lreq->last_error);
2830 if (req->r_result < 0) {
2831 if (!lreq->last_error) {
2832 lreq->last_error = normalize_watch_error(req->r_result);
2833 queue_watch_error(lreq);
2837 mutex_unlock(&lreq->lock);
2841 static void send_linger(struct ceph_osd_linger_request *lreq)
2843 struct ceph_osd_request *req = lreq->reg_req;
2844 struct ceph_osd_req_op *op = &req->r_ops[0];
2846 verify_osdc_wrlocked(req->r_osdc);
2847 dout("%s lreq %p linger_id %llu\n", __func__, lreq, lreq->linger_id);
2850 cancel_linger_request(req);
2852 request_reinit(req);
2853 ceph_oid_copy(&req->r_base_oid, &lreq->t.base_oid);
2854 ceph_oloc_copy(&req->r_base_oloc, &lreq->t.base_oloc);
2855 req->r_flags = lreq->t.flags;
2856 req->r_mtime = lreq->mtime;
2858 mutex_lock(&lreq->lock);
2859 if (lreq->is_watch && lreq->committed) {
2860 WARN_ON(op->op != CEPH_OSD_OP_WATCH ||
2861 op->watch.cookie != lreq->linger_id);
2862 op->watch.op = CEPH_OSD_WATCH_OP_RECONNECT;
2863 op->watch.gen = ++lreq->register_gen;
2864 dout("lreq %p reconnect register_gen %u\n", lreq,
2866 req->r_callback = linger_reconnect_cb;
2868 if (!lreq->is_watch)
2869 lreq->notify_id = 0;
2871 WARN_ON(op->watch.op != CEPH_OSD_WATCH_OP_WATCH);
2872 dout("lreq %p register\n", lreq);
2873 req->r_callback = linger_commit_cb;
2875 mutex_unlock(&lreq->lock);
2877 req->r_priv = linger_get(lreq);
2878 req->r_linger = true;
2880 submit_request(req, true);
2883 static void linger_ping_cb(struct ceph_osd_request *req)
2885 struct ceph_osd_linger_request *lreq = req->r_priv;
2887 mutex_lock(&lreq->lock);
2888 dout("%s lreq %p linger_id %llu result %d ping_sent %lu last_error %d\n",
2889 __func__, lreq, lreq->linger_id, req->r_result, lreq->ping_sent,
2891 if (lreq->register_gen == req->r_ops[0].watch.gen) {
2892 if (!req->r_result) {
2893 lreq->watch_valid_thru = lreq->ping_sent;
2894 } else if (!lreq->last_error) {
2895 lreq->last_error = normalize_watch_error(req->r_result);
2896 queue_watch_error(lreq);
2899 dout("lreq %p register_gen %u ignoring old pong %u\n", lreq,
2900 lreq->register_gen, req->r_ops[0].watch.gen);
2903 mutex_unlock(&lreq->lock);
2907 static void send_linger_ping(struct ceph_osd_linger_request *lreq)
2909 struct ceph_osd_client *osdc = lreq->osdc;
2910 struct ceph_osd_request *req = lreq->ping_req;
2911 struct ceph_osd_req_op *op = &req->r_ops[0];
2913 if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD)) {
2914 dout("%s PAUSERD\n", __func__);
2918 lreq->ping_sent = jiffies;
2919 dout("%s lreq %p linger_id %llu ping_sent %lu register_gen %u\n",
2920 __func__, lreq, lreq->linger_id, lreq->ping_sent,
2921 lreq->register_gen);
2924 cancel_linger_request(req);
2926 request_reinit(req);
2927 target_copy(&req->r_t, &lreq->t);
2929 WARN_ON(op->op != CEPH_OSD_OP_WATCH ||
2930 op->watch.cookie != lreq->linger_id ||
2931 op->watch.op != CEPH_OSD_WATCH_OP_PING);
2932 op->watch.gen = lreq->register_gen;
2933 req->r_callback = linger_ping_cb;
2934 req->r_priv = linger_get(lreq);
2935 req->r_linger = true;
2937 ceph_osdc_get_request(req);
2938 account_request(req);
2939 req->r_tid = atomic64_inc_return(&osdc->last_tid);
2940 link_request(lreq->osd, req);
2944 static void linger_submit(struct ceph_osd_linger_request *lreq)
2946 struct ceph_osd_client *osdc = lreq->osdc;
2947 struct ceph_osd *osd;
2949 calc_target(osdc, &lreq->t, NULL, false);
2950 osd = lookup_create_osd(osdc, lreq->t.osd, true);
2951 link_linger(osd, lreq);
2956 static void cancel_linger_map_check(struct ceph_osd_linger_request *lreq)
2958 struct ceph_osd_client *osdc = lreq->osdc;
2959 struct ceph_osd_linger_request *lookup_lreq;
2961 verify_osdc_wrlocked(osdc);
2963 lookup_lreq = lookup_linger_mc(&osdc->linger_map_checks,
2968 WARN_ON(lookup_lreq != lreq);
2969 erase_linger_mc(&osdc->linger_map_checks, lreq);
2974 * @lreq has to be both registered and linked.
2976 static void __linger_cancel(struct ceph_osd_linger_request *lreq)
2978 if (lreq->is_watch && lreq->ping_req->r_osd)
2979 cancel_linger_request(lreq->ping_req);
2980 if (lreq->reg_req->r_osd)
2981 cancel_linger_request(lreq->reg_req);
2982 cancel_linger_map_check(lreq);
2983 unlink_linger(lreq->osd, lreq);
2984 linger_unregister(lreq);
2987 static void linger_cancel(struct ceph_osd_linger_request *lreq)
2989 struct ceph_osd_client *osdc = lreq->osdc;
2991 down_write(&osdc->lock);
2992 if (__linger_registered(lreq))
2993 __linger_cancel(lreq);
2994 up_write(&osdc->lock);
2997 static void send_linger_map_check(struct ceph_osd_linger_request *lreq);
2999 static void check_linger_pool_dne(struct ceph_osd_linger_request *lreq)
3001 struct ceph_osd_client *osdc = lreq->osdc;
3002 struct ceph_osdmap *map = osdc->osdmap;
3004 verify_osdc_wrlocked(osdc);
3005 WARN_ON(!map->epoch);
3007 if (lreq->register_gen) {
3008 lreq->map_dne_bound = map->epoch;
3009 dout("%s lreq %p linger_id %llu pool disappeared\n", __func__,
3010 lreq, lreq->linger_id);
3012 dout("%s lreq %p linger_id %llu map_dne_bound %u have %u\n",
3013 __func__, lreq, lreq->linger_id, lreq->map_dne_bound,
3017 if (lreq->map_dne_bound) {
3018 if (map->epoch >= lreq->map_dne_bound) {
3019 /* we had a new enough map */
3020 pr_info("linger_id %llu pool does not exist\n",
3022 linger_reg_commit_complete(lreq, -ENOENT);
3023 __linger_cancel(lreq);
3026 send_linger_map_check(lreq);
3030 static void linger_map_check_cb(struct ceph_mon_generic_request *greq)
3032 struct ceph_osd_client *osdc = &greq->monc->client->osdc;
3033 struct ceph_osd_linger_request *lreq;
3034 u64 linger_id = greq->private_data;
3036 WARN_ON(greq->result || !greq->u.newest);
3038 down_write(&osdc->lock);
3039 lreq = lookup_linger_mc(&osdc->linger_map_checks, linger_id);
3041 dout("%s linger_id %llu dne\n", __func__, linger_id);
3045 dout("%s lreq %p linger_id %llu map_dne_bound %u newest %llu\n",
3046 __func__, lreq, lreq->linger_id, lreq->map_dne_bound,
3048 if (!lreq->map_dne_bound)
3049 lreq->map_dne_bound = greq->u.newest;
3050 erase_linger_mc(&osdc->linger_map_checks, lreq);
3051 check_linger_pool_dne(lreq);
3055 up_write(&osdc->lock);
3058 static void send_linger_map_check(struct ceph_osd_linger_request *lreq)
3060 struct ceph_osd_client *osdc = lreq->osdc;
3061 struct ceph_osd_linger_request *lookup_lreq;
3064 verify_osdc_wrlocked(osdc);
3066 lookup_lreq = lookup_linger_mc(&osdc->linger_map_checks,
3069 WARN_ON(lookup_lreq != lreq);
3074 insert_linger_mc(&osdc->linger_map_checks, lreq);
3075 ret = ceph_monc_get_version_async(&osdc->client->monc, "osdmap",
3076 linger_map_check_cb, lreq->linger_id);
3080 static int linger_reg_commit_wait(struct ceph_osd_linger_request *lreq)
3084 dout("%s lreq %p linger_id %llu\n", __func__, lreq, lreq->linger_id);
3085 ret = wait_for_completion_interruptible(&lreq->reg_commit_wait);
3086 return ret ?: lreq->reg_commit_error;
3089 static int linger_notify_finish_wait(struct ceph_osd_linger_request *lreq)
3093 dout("%s lreq %p linger_id %llu\n", __func__, lreq, lreq->linger_id);
3094 ret = wait_for_completion_interruptible(&lreq->notify_finish_wait);
3095 return ret ?: lreq->notify_finish_error;
3099 * Timeout callback, called every N seconds. When 1 or more OSD
3100 * requests has been active for more than N seconds, we send a keepalive
3101 * (tag + timestamp) to its OSD to ensure any communications channel
3102 * reset is detected.
3104 static void handle_timeout(struct work_struct *work)
3106 struct ceph_osd_client *osdc =
3107 container_of(work, struct ceph_osd_client, timeout_work.work);
3108 struct ceph_options *opts = osdc->client->options;
3109 unsigned long cutoff = jiffies - opts->osd_keepalive_timeout;
3110 unsigned long expiry_cutoff = jiffies - opts->osd_request_timeout;
3111 LIST_HEAD(slow_osds);
3112 struct rb_node *n, *p;
3114 dout("%s osdc %p\n", __func__, osdc);
3115 down_write(&osdc->lock);
3118 * ping osds that are a bit slow. this ensures that if there
3119 * is a break in the TCP connection we will notice, and reopen
3120 * a connection with that osd (from the fault callback).
3122 for (n = rb_first(&osdc->osds); n; n = rb_next(n)) {
3123 struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
3126 for (p = rb_first(&osd->o_requests); p; ) {
3127 struct ceph_osd_request *req =
3128 rb_entry(p, struct ceph_osd_request, r_node);
3130 p = rb_next(p); /* abort_request() */
3132 if (time_before(req->r_stamp, cutoff)) {
3133 dout(" req %p tid %llu on osd%d is laggy\n",
3134 req, req->r_tid, osd->o_osd);
3137 if (opts->osd_request_timeout &&
3138 time_before(req->r_start_stamp, expiry_cutoff)) {
3139 pr_err_ratelimited("tid %llu on osd%d timeout\n",
3140 req->r_tid, osd->o_osd);
3141 abort_request(req, -ETIMEDOUT);
3144 for (p = rb_first(&osd->o_linger_requests); p; p = rb_next(p)) {
3145 struct ceph_osd_linger_request *lreq =
3146 rb_entry(p, struct ceph_osd_linger_request, node);
3148 dout(" lreq %p linger_id %llu is served by osd%d\n",
3149 lreq, lreq->linger_id, osd->o_osd);
3152 mutex_lock(&lreq->lock);
3153 if (lreq->is_watch && lreq->committed && !lreq->last_error)
3154 send_linger_ping(lreq);
3155 mutex_unlock(&lreq->lock);
3159 list_move_tail(&osd->o_keepalive_item, &slow_osds);
3162 if (opts->osd_request_timeout) {
3163 for (p = rb_first(&osdc->homeless_osd.o_requests); p; ) {
3164 struct ceph_osd_request *req =
3165 rb_entry(p, struct ceph_osd_request, r_node);
3167 p = rb_next(p); /* abort_request() */
3169 if (time_before(req->r_start_stamp, expiry_cutoff)) {
3170 pr_err_ratelimited("tid %llu on osd%d timeout\n",
3171 req->r_tid, osdc->homeless_osd.o_osd);
3172 abort_request(req, -ETIMEDOUT);
3177 if (atomic_read(&osdc->num_homeless) || !list_empty(&slow_osds))
3178 maybe_request_map(osdc);
3180 while (!list_empty(&slow_osds)) {
3181 struct ceph_osd *osd = list_first_entry(&slow_osds,
3184 list_del_init(&osd->o_keepalive_item);
3185 ceph_con_keepalive(&osd->o_con);
3188 up_write(&osdc->lock);
3189 schedule_delayed_work(&osdc->timeout_work,
3190 osdc->client->options->osd_keepalive_timeout);
3193 static void handle_osds_timeout(struct work_struct *work)
3195 struct ceph_osd_client *osdc =
3196 container_of(work, struct ceph_osd_client,
3197 osds_timeout_work.work);
3198 unsigned long delay = osdc->client->options->osd_idle_ttl / 4;
3199 struct ceph_osd *osd, *nosd;
3201 dout("%s osdc %p\n", __func__, osdc);
3202 down_write(&osdc->lock);
3203 list_for_each_entry_safe(osd, nosd, &osdc->osd_lru, o_osd_lru) {
3204 if (time_before(jiffies, osd->lru_ttl))
3207 WARN_ON(!RB_EMPTY_ROOT(&osd->o_requests));
3208 WARN_ON(!RB_EMPTY_ROOT(&osd->o_linger_requests));
3212 up_write(&osdc->lock);
3213 schedule_delayed_work(&osdc->osds_timeout_work,
3214 round_jiffies_relative(delay));
3217 static int ceph_oloc_decode(void **p, void *end,
3218 struct ceph_object_locator *oloc)
3220 u8 struct_v, struct_cv;
3225 ceph_decode_need(p, end, 1 + 1 + 4, e_inval);
3226 struct_v = ceph_decode_8(p);
3227 struct_cv = ceph_decode_8(p);
3229 pr_warn("got v %d < 3 cv %d of ceph_object_locator\n",
3230 struct_v, struct_cv);
3233 if (struct_cv > 6) {
3234 pr_warn("got v %d cv %d > 6 of ceph_object_locator\n",
3235 struct_v, struct_cv);
3238 len = ceph_decode_32(p);
3239 ceph_decode_need(p, end, len, e_inval);
3240 struct_end = *p + len;
3242 oloc->pool = ceph_decode_64(p);
3243 *p += 4; /* skip preferred */
3245 len = ceph_decode_32(p);
3247 pr_warn("ceph_object_locator::key is set\n");
3251 if (struct_v >= 5) {
3252 bool changed = false;
3254 len = ceph_decode_32(p);
3256 ceph_decode_need(p, end, len, e_inval);
3257 if (!oloc->pool_ns ||
3258 ceph_compare_string(oloc->pool_ns, *p, len))
3266 /* redirect changes namespace */
3267 pr_warn("ceph_object_locator::nspace is changed\n");
3272 if (struct_v >= 6) {
3273 s64 hash = ceph_decode_64(p);
3275 pr_warn("ceph_object_locator::hash is set\n");
3290 static int ceph_redirect_decode(void **p, void *end,
3291 struct ceph_request_redirect *redir)
3293 u8 struct_v, struct_cv;
3298 ceph_decode_need(p, end, 1 + 1 + 4, e_inval);
3299 struct_v = ceph_decode_8(p);
3300 struct_cv = ceph_decode_8(p);
3301 if (struct_cv > 1) {
3302 pr_warn("got v %d cv %d > 1 of ceph_request_redirect\n",
3303 struct_v, struct_cv);
3306 len = ceph_decode_32(p);
3307 ceph_decode_need(p, end, len, e_inval);
3308 struct_end = *p + len;
3310 ret = ceph_oloc_decode(p, end, &redir->oloc);
3314 len = ceph_decode_32(p);
3316 pr_warn("ceph_request_redirect::object_name is set\n");
3320 len = ceph_decode_32(p);
3321 *p += len; /* skip osd_instructions */
3333 struct MOSDOpReply {
3334 struct ceph_pg pgid;
3339 u32 outdata_len[CEPH_OSD_MAX_OPS];
3340 s32 rval[CEPH_OSD_MAX_OPS];
3342 struct ceph_eversion replay_version;
3344 struct ceph_request_redirect redirect;
3347 static int decode_MOSDOpReply(const struct ceph_msg *msg, struct MOSDOpReply *m)
3349 void *p = msg->front.iov_base;
3350 void *const end = p + msg->front.iov_len;
3351 u16 version = le16_to_cpu(msg->hdr.version);
3352 struct ceph_eversion bad_replay_version;
3358 ceph_decode_32_safe(&p, end, len, e_inval);
3359 ceph_decode_need(&p, end, len, e_inval);
3360 p += len; /* skip oid */
3362 ret = ceph_decode_pgid(&p, end, &m->pgid);
3366 ceph_decode_64_safe(&p, end, m->flags, e_inval);
3367 ceph_decode_32_safe(&p, end, m->result, e_inval);
3368 ceph_decode_need(&p, end, sizeof(bad_replay_version), e_inval);
3369 memcpy(&bad_replay_version, p, sizeof(bad_replay_version));
3370 p += sizeof(bad_replay_version);
3371 ceph_decode_32_safe(&p, end, m->epoch, e_inval);
3373 ceph_decode_32_safe(&p, end, m->num_ops, e_inval);
3374 if (m->num_ops > ARRAY_SIZE(m->outdata_len))
3377 ceph_decode_need(&p, end, m->num_ops * sizeof(struct ceph_osd_op),
3379 for (i = 0; i < m->num_ops; i++) {
3380 struct ceph_osd_op *op = p;
3382 m->outdata_len[i] = le32_to_cpu(op->payload_len);
3386 ceph_decode_32_safe(&p, end, m->retry_attempt, e_inval);
3387 for (i = 0; i < m->num_ops; i++)
3388 ceph_decode_32_safe(&p, end, m->rval[i], e_inval);
3391 ceph_decode_need(&p, end, sizeof(m->replay_version), e_inval);
3392 memcpy(&m->replay_version, p, sizeof(m->replay_version));
3393 p += sizeof(m->replay_version);
3394 ceph_decode_64_safe(&p, end, m->user_version, e_inval);
3396 m->replay_version = bad_replay_version; /* struct */
3397 m->user_version = le64_to_cpu(m->replay_version.version);
3402 ceph_decode_8_safe(&p, end, decode_redir, e_inval);
3410 ret = ceph_redirect_decode(&p, end, &m->redirect);
3414 ceph_oloc_init(&m->redirect.oloc);
3424 * Handle MOSDOpReply. Set ->r_result and call the callback if it is
3427 static void handle_reply(struct ceph_osd *osd, struct ceph_msg *msg)
3429 struct ceph_osd_client *osdc = osd->o_osdc;
3430 struct ceph_osd_request *req;
3431 struct MOSDOpReply m;
3432 u64 tid = le64_to_cpu(msg->hdr.tid);
3437 dout("%s msg %p tid %llu\n", __func__, msg, tid);
3439 down_read(&osdc->lock);
3440 if (!osd_registered(osd)) {
3441 dout("%s osd%d unknown\n", __func__, osd->o_osd);
3442 goto out_unlock_osdc;
3444 WARN_ON(osd->o_osd != le64_to_cpu(msg->hdr.src.num));
3446 mutex_lock(&osd->lock);
3447 req = lookup_request(&osd->o_requests, tid);
3449 dout("%s osd%d tid %llu unknown\n", __func__, osd->o_osd, tid);
3450 goto out_unlock_session;
3453 m.redirect.oloc.pool_ns = req->r_t.target_oloc.pool_ns;
3454 ret = decode_MOSDOpReply(msg, &m);
3455 m.redirect.oloc.pool_ns = NULL;
3457 pr_err("failed to decode MOSDOpReply for tid %llu: %d\n",
3462 dout("%s req %p tid %llu flags 0x%llx pgid %llu.%x epoch %u attempt %d v %u'%llu uv %llu\n",
3463 __func__, req, req->r_tid, m.flags, m.pgid.pool, m.pgid.seed,
3464 m.epoch, m.retry_attempt, le32_to_cpu(m.replay_version.epoch),
3465 le64_to_cpu(m.replay_version.version), m.user_version);
3467 if (m.retry_attempt >= 0) {
3468 if (m.retry_attempt != req->r_attempts - 1) {
3469 dout("req %p tid %llu retry_attempt %d != %d, ignoring\n",
3470 req, req->r_tid, m.retry_attempt,
3471 req->r_attempts - 1);
3472 goto out_unlock_session;
3475 WARN_ON(1); /* MOSDOpReply v4 is assumed */
3478 if (!ceph_oloc_empty(&m.redirect.oloc)) {
3479 dout("req %p tid %llu redirect pool %lld\n", req, req->r_tid,
3480 m.redirect.oloc.pool);
3481 unlink_request(osd, req);
3482 mutex_unlock(&osd->lock);
3485 * Not ceph_oloc_copy() - changing pool_ns is not
3488 req->r_t.target_oloc.pool = m.redirect.oloc.pool;
3489 req->r_flags |= CEPH_OSD_FLAG_REDIRECTED;
3491 __submit_request(req, false);
3492 goto out_unlock_osdc;
3495 if (m.num_ops != req->r_num_ops) {
3496 pr_err("num_ops %d != %d for tid %llu\n", m.num_ops,
3497 req->r_num_ops, req->r_tid);
3500 for (i = 0; i < req->r_num_ops; i++) {
3501 dout(" req %p tid %llu op %d rval %d len %u\n", req,
3502 req->r_tid, i, m.rval[i], m.outdata_len[i]);
3503 req->r_ops[i].rval = m.rval[i];
3504 req->r_ops[i].outdata_len = m.outdata_len[i];
3505 data_len += m.outdata_len[i];
3507 if (data_len != le32_to_cpu(msg->hdr.data_len)) {
3508 pr_err("sum of lens %u != %u for tid %llu\n", data_len,
3509 le32_to_cpu(msg->hdr.data_len), req->r_tid);
3512 dout("%s req %p tid %llu result %d data_len %u\n", __func__,
3513 req, req->r_tid, m.result, data_len);
3516 * Since we only ever request ONDISK, we should only ever get
3517 * one (type of) reply back.
3519 WARN_ON(!(m.flags & CEPH_OSD_FLAG_ONDISK));
3520 req->r_result = m.result ?: data_len;
3521 finish_request(req);
3522 mutex_unlock(&osd->lock);
3523 up_read(&osdc->lock);
3525 __complete_request(req);
3526 complete_all(&req->r_completion);
3527 ceph_osdc_put_request(req);
3531 complete_request(req, -EIO);
3533 mutex_unlock(&osd->lock);
3535 up_read(&osdc->lock);
3538 static void set_pool_was_full(struct ceph_osd_client *osdc)
3542 for (n = rb_first(&osdc->osdmap->pg_pools); n; n = rb_next(n)) {
3543 struct ceph_pg_pool_info *pi =
3544 rb_entry(n, struct ceph_pg_pool_info, node);
3546 pi->was_full = __pool_full(pi);
3550 static bool pool_cleared_full(struct ceph_osd_client *osdc, s64 pool_id)
3552 struct ceph_pg_pool_info *pi;
3554 pi = ceph_pg_pool_by_id(osdc->osdmap, pool_id);
3558 return pi->was_full && !__pool_full(pi);
3561 static enum calc_target_result
3562 recalc_linger_target(struct ceph_osd_linger_request *lreq)
3564 struct ceph_osd_client *osdc = lreq->osdc;
3565 enum calc_target_result ct_res;
3567 ct_res = calc_target(osdc, &lreq->t, NULL, true);
3568 if (ct_res == CALC_TARGET_NEED_RESEND) {
3569 struct ceph_osd *osd;
3571 osd = lookup_create_osd(osdc, lreq->t.osd, true);
3572 if (osd != lreq->osd) {
3573 unlink_linger(lreq->osd, lreq);
3574 link_linger(osd, lreq);
3582 * Requeue requests whose mapping to an OSD has changed.
3584 static void scan_requests(struct ceph_osd *osd,
3587 bool check_pool_cleared_full,
3588 struct rb_root *need_resend,
3589 struct list_head *need_resend_linger)
3591 struct ceph_osd_client *osdc = osd->o_osdc;
3593 bool force_resend_writes;
3595 for (n = rb_first(&osd->o_linger_requests); n; ) {
3596 struct ceph_osd_linger_request *lreq =
3597 rb_entry(n, struct ceph_osd_linger_request, node);
3598 enum calc_target_result ct_res;
3600 n = rb_next(n); /* recalc_linger_target() */
3602 dout("%s lreq %p linger_id %llu\n", __func__, lreq,
3604 ct_res = recalc_linger_target(lreq);
3606 case CALC_TARGET_NO_ACTION:
3607 force_resend_writes = cleared_full ||
3608 (check_pool_cleared_full &&
3609 pool_cleared_full(osdc, lreq->t.base_oloc.pool));
3610 if (!force_resend && !force_resend_writes)
3614 case CALC_TARGET_NEED_RESEND:
3615 cancel_linger_map_check(lreq);
3617 * scan_requests() for the previous epoch(s)
3618 * may have already added it to the list, since
3619 * it's not unlinked here.
3621 if (list_empty(&lreq->scan_item))
3622 list_add_tail(&lreq->scan_item, need_resend_linger);
3624 case CALC_TARGET_POOL_DNE:
3625 list_del_init(&lreq->scan_item);
3626 check_linger_pool_dne(lreq);
3631 for (n = rb_first(&osd->o_requests); n; ) {
3632 struct ceph_osd_request *req =
3633 rb_entry(n, struct ceph_osd_request, r_node);
3634 enum calc_target_result ct_res;
3636 n = rb_next(n); /* unlink_request(), check_pool_dne() */
3638 dout("%s req %p tid %llu\n", __func__, req, req->r_tid);
3639 ct_res = calc_target(osdc, &req->r_t, &req->r_osd->o_con,
3642 case CALC_TARGET_NO_ACTION:
3643 force_resend_writes = cleared_full ||
3644 (check_pool_cleared_full &&
3645 pool_cleared_full(osdc, req->r_t.base_oloc.pool));
3646 if (!force_resend &&
3647 (!(req->r_flags & CEPH_OSD_FLAG_WRITE) ||
3648 !force_resend_writes))
3652 case CALC_TARGET_NEED_RESEND:
3653 cancel_map_check(req);
3654 unlink_request(osd, req);
3655 insert_request(need_resend, req);
3657 case CALC_TARGET_POOL_DNE:
3658 check_pool_dne(req);
3664 static int handle_one_map(struct ceph_osd_client *osdc,
3665 void *p, void *end, bool incremental,
3666 struct rb_root *need_resend,
3667 struct list_head *need_resend_linger)
3669 struct ceph_osdmap *newmap;
3671 bool skipped_map = false;
3674 was_full = ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL);
3675 set_pool_was_full(osdc);
3678 newmap = osdmap_apply_incremental(&p, end, osdc->osdmap);
3680 newmap = ceph_osdmap_decode(&p, end);
3682 return PTR_ERR(newmap);
3684 if (newmap != osdc->osdmap) {
3686 * Preserve ->was_full before destroying the old map.
3687 * For pools that weren't in the old map, ->was_full
3690 for (n = rb_first(&newmap->pg_pools); n; n = rb_next(n)) {
3691 struct ceph_pg_pool_info *pi =
3692 rb_entry(n, struct ceph_pg_pool_info, node);
3693 struct ceph_pg_pool_info *old_pi;
3695 old_pi = ceph_pg_pool_by_id(osdc->osdmap, pi->id);
3697 pi->was_full = old_pi->was_full;
3699 WARN_ON(pi->was_full);
3702 if (osdc->osdmap->epoch &&
3703 osdc->osdmap->epoch + 1 < newmap->epoch) {
3704 WARN_ON(incremental);
3708 ceph_osdmap_destroy(osdc->osdmap);
3709 osdc->osdmap = newmap;
3712 was_full &= !ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL);
3713 scan_requests(&osdc->homeless_osd, skipped_map, was_full, true,
3714 need_resend, need_resend_linger);
3716 for (n = rb_first(&osdc->osds); n; ) {
3717 struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
3719 n = rb_next(n); /* close_osd() */
3721 scan_requests(osd, skipped_map, was_full, true, need_resend,
3722 need_resend_linger);
3723 if (!ceph_osd_is_up(osdc->osdmap, osd->o_osd) ||
3724 memcmp(&osd->o_con.peer_addr,
3725 ceph_osd_addr(osdc->osdmap, osd->o_osd),
3726 sizeof(struct ceph_entity_addr)))
3733 static void kick_requests(struct ceph_osd_client *osdc,
3734 struct rb_root *need_resend,
3735 struct list_head *need_resend_linger)
3737 struct ceph_osd_linger_request *lreq, *nlreq;
3738 enum calc_target_result ct_res;
3741 /* make sure need_resend targets reflect latest map */
3742 for (n = rb_first(need_resend); n; ) {
3743 struct ceph_osd_request *req =
3744 rb_entry(n, struct ceph_osd_request, r_node);
3748 if (req->r_t.epoch < osdc->osdmap->epoch) {
3749 ct_res = calc_target(osdc, &req->r_t, NULL, false);
3750 if (ct_res == CALC_TARGET_POOL_DNE) {
3751 erase_request(need_resend, req);
3752 check_pool_dne(req);
3757 for (n = rb_first(need_resend); n; ) {
3758 struct ceph_osd_request *req =
3759 rb_entry(n, struct ceph_osd_request, r_node);
3760 struct ceph_osd *osd;
3763 erase_request(need_resend, req); /* before link_request() */
3765 osd = lookup_create_osd(osdc, req->r_t.osd, true);
3766 link_request(osd, req);
3767 if (!req->r_linger) {
3768 if (!osd_homeless(osd) && !req->r_t.paused)
3771 cancel_linger_request(req);
3775 list_for_each_entry_safe(lreq, nlreq, need_resend_linger, scan_item) {
3776 if (!osd_homeless(lreq->osd))
3779 list_del_init(&lreq->scan_item);
3784 * Process updated osd map.
3786 * The message contains any number of incremental and full maps, normally
3787 * indicating some sort of topology change in the cluster. Kick requests
3788 * off to different OSDs as needed.
3790 void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg)
3792 void *p = msg->front.iov_base;
3793 void *const end = p + msg->front.iov_len;
3794 u32 nr_maps, maplen;
3796 struct ceph_fsid fsid;
3797 struct rb_root need_resend = RB_ROOT;
3798 LIST_HEAD(need_resend_linger);
3799 bool handled_incremental = false;
3800 bool was_pauserd, was_pausewr;
3801 bool pauserd, pausewr;
3804 dout("%s have %u\n", __func__, osdc->osdmap->epoch);
3805 down_write(&osdc->lock);
3808 ceph_decode_need(&p, end, sizeof(fsid), bad);
3809 ceph_decode_copy(&p, &fsid, sizeof(fsid));
3810 if (ceph_check_fsid(osdc->client, &fsid) < 0)
3813 was_pauserd = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD);
3814 was_pausewr = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR) ||
3815 ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
3816 have_pool_full(osdc);
3818 /* incremental maps */
3819 ceph_decode_32_safe(&p, end, nr_maps, bad);
3820 dout(" %d inc maps\n", nr_maps);
3821 while (nr_maps > 0) {
3822 ceph_decode_need(&p, end, 2*sizeof(u32), bad);
3823 epoch = ceph_decode_32(&p);
3824 maplen = ceph_decode_32(&p);
3825 ceph_decode_need(&p, end, maplen, bad);
3826 if (osdc->osdmap->epoch &&
3827 osdc->osdmap->epoch + 1 == epoch) {
3828 dout("applying incremental map %u len %d\n",
3830 err = handle_one_map(osdc, p, p + maplen, true,
3831 &need_resend, &need_resend_linger);
3834 handled_incremental = true;
3836 dout("ignoring incremental map %u len %d\n",
3842 if (handled_incremental)
3846 ceph_decode_32_safe(&p, end, nr_maps, bad);
3847 dout(" %d full maps\n", nr_maps);
3849 ceph_decode_need(&p, end, 2*sizeof(u32), bad);
3850 epoch = ceph_decode_32(&p);
3851 maplen = ceph_decode_32(&p);
3852 ceph_decode_need(&p, end, maplen, bad);
3854 dout("skipping non-latest full map %u len %d\n",
3856 } else if (osdc->osdmap->epoch >= epoch) {
3857 dout("skipping full map %u len %d, "
3858 "older than our %u\n", epoch, maplen,
3859 osdc->osdmap->epoch);
3861 dout("taking full map %u len %d\n", epoch, maplen);
3862 err = handle_one_map(osdc, p, p + maplen, false,
3863 &need_resend, &need_resend_linger);
3873 * subscribe to subsequent osdmap updates if full to ensure
3874 * we find out when we are no longer full and stop returning
3877 pauserd = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD);
3878 pausewr = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR) ||
3879 ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
3880 have_pool_full(osdc);
3881 if (was_pauserd || was_pausewr || pauserd || pausewr ||
3882 osdc->osdmap->epoch < osdc->epoch_barrier)
3883 maybe_request_map(osdc);
3885 kick_requests(osdc, &need_resend, &need_resend_linger);
3887 ceph_osdc_abort_on_full(osdc);
3888 ceph_monc_got_map(&osdc->client->monc, CEPH_SUB_OSDMAP,
3889 osdc->osdmap->epoch);
3890 up_write(&osdc->lock);
3891 wake_up_all(&osdc->client->auth_wq);
3895 pr_err("osdc handle_map corrupt msg\n");
3897 up_write(&osdc->lock);
3901 * Resubmit requests pending on the given osd.
3903 static void kick_osd_requests(struct ceph_osd *osd)
3907 clear_backoffs(osd);
3909 for (n = rb_first(&osd->o_requests); n; ) {
3910 struct ceph_osd_request *req =
3911 rb_entry(n, struct ceph_osd_request, r_node);
3913 n = rb_next(n); /* cancel_linger_request() */
3915 if (!req->r_linger) {
3916 if (!req->r_t.paused)
3919 cancel_linger_request(req);
3922 for (n = rb_first(&osd->o_linger_requests); n; n = rb_next(n)) {
3923 struct ceph_osd_linger_request *lreq =
3924 rb_entry(n, struct ceph_osd_linger_request, node);
3931 * If the osd connection drops, we need to resubmit all requests.
3933 static void osd_fault(struct ceph_connection *con)
3935 struct ceph_osd *osd = con->private;
3936 struct ceph_osd_client *osdc = osd->o_osdc;
3938 dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
3940 down_write(&osdc->lock);
3941 if (!osd_registered(osd)) {
3942 dout("%s osd%d unknown\n", __func__, osd->o_osd);
3946 if (!reopen_osd(osd))
3947 kick_osd_requests(osd);
3948 maybe_request_map(osdc);
3951 up_write(&osdc->lock);
3954 struct MOSDBackoff {
3955 struct ceph_spg spgid;
3959 struct ceph_hobject_id *begin;
3960 struct ceph_hobject_id *end;
3963 static int decode_MOSDBackoff(const struct ceph_msg *msg, struct MOSDBackoff *m)
3965 void *p = msg->front.iov_base;
3966 void *const end = p + msg->front.iov_len;
3971 ret = ceph_start_decoding(&p, end, 1, "spg_t", &struct_v, &struct_len);
3975 ret = ceph_decode_pgid(&p, end, &m->spgid.pgid);
3979 ceph_decode_8_safe(&p, end, m->spgid.shard, e_inval);
3980 ceph_decode_32_safe(&p, end, m->map_epoch, e_inval);
3981 ceph_decode_8_safe(&p, end, m->op, e_inval);
3982 ceph_decode_64_safe(&p, end, m->id, e_inval);
3984 m->begin = kzalloc(sizeof(*m->begin), GFP_NOIO);
3988 ret = decode_hoid(&p, end, m->begin);
3990 free_hoid(m->begin);
3994 m->end = kzalloc(sizeof(*m->end), GFP_NOIO);
3996 free_hoid(m->begin);
4000 ret = decode_hoid(&p, end, m->end);
4002 free_hoid(m->begin);
4013 static struct ceph_msg *create_backoff_message(
4014 const struct ceph_osd_backoff *backoff,
4017 struct ceph_msg *msg;
4021 msg_size = CEPH_ENCODING_START_BLK_LEN +
4022 CEPH_PGID_ENCODING_LEN + 1; /* spgid */
4023 msg_size += 4 + 1 + 8; /* map_epoch, op, id */
4024 msg_size += CEPH_ENCODING_START_BLK_LEN +
4025 hoid_encoding_size(backoff->begin);
4026 msg_size += CEPH_ENCODING_START_BLK_LEN +
4027 hoid_encoding_size(backoff->end);
4029 msg = ceph_msg_new(CEPH_MSG_OSD_BACKOFF, msg_size, GFP_NOIO, true);
4033 p = msg->front.iov_base;
4034 end = p + msg->front_alloc_len;
4036 encode_spgid(&p, &backoff->spgid);
4037 ceph_encode_32(&p, map_epoch);
4038 ceph_encode_8(&p, CEPH_OSD_BACKOFF_OP_ACK_BLOCK);
4039 ceph_encode_64(&p, backoff->id);
4040 encode_hoid(&p, end, backoff->begin);
4041 encode_hoid(&p, end, backoff->end);
4044 msg->front.iov_len = p - msg->front.iov_base;
4045 msg->hdr.version = cpu_to_le16(1); /* MOSDBackoff v1 */
4046 msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
4051 static void handle_backoff_block(struct ceph_osd *osd, struct MOSDBackoff *m)
4053 struct ceph_spg_mapping *spg;
4054 struct ceph_osd_backoff *backoff;
4055 struct ceph_msg *msg;
4057 dout("%s osd%d spgid %llu.%xs%d id %llu\n", __func__, osd->o_osd,
4058 m->spgid.pgid.pool, m->spgid.pgid.seed, m->spgid.shard, m->id);
4060 spg = lookup_spg_mapping(&osd->o_backoff_mappings, &m->spgid);
4062 spg = alloc_spg_mapping();
4064 pr_err("%s failed to allocate spg\n", __func__);
4067 spg->spgid = m->spgid; /* struct */
4068 insert_spg_mapping(&osd->o_backoff_mappings, spg);
4071 backoff = alloc_backoff();
4073 pr_err("%s failed to allocate backoff\n", __func__);
4076 backoff->spgid = m->spgid; /* struct */
4077 backoff->id = m->id;
4078 backoff->begin = m->begin;
4079 m->begin = NULL; /* backoff now owns this */
4080 backoff->end = m->end;
4081 m->end = NULL; /* ditto */
4083 insert_backoff(&spg->backoffs, backoff);
4084 insert_backoff_by_id(&osd->o_backoffs_by_id, backoff);
4087 * Ack with original backoff's epoch so that the OSD can
4088 * discard this if there was a PG split.
4090 msg = create_backoff_message(backoff, m->map_epoch);
4092 pr_err("%s failed to allocate msg\n", __func__);
4095 ceph_con_send(&osd->o_con, msg);
4098 static bool target_contained_by(const struct ceph_osd_request_target *t,
4099 const struct ceph_hobject_id *begin,
4100 const struct ceph_hobject_id *end)
4102 struct ceph_hobject_id hoid;
4105 hoid_fill_from_target(&hoid, t);
4106 cmp = hoid_compare(&hoid, begin);
4107 return !cmp || (cmp > 0 && hoid_compare(&hoid, end) < 0);
4110 static void handle_backoff_unblock(struct ceph_osd *osd,
4111 const struct MOSDBackoff *m)
4113 struct ceph_spg_mapping *spg;
4114 struct ceph_osd_backoff *backoff;
4117 dout("%s osd%d spgid %llu.%xs%d id %llu\n", __func__, osd->o_osd,
4118 m->spgid.pgid.pool, m->spgid.pgid.seed, m->spgid.shard, m->id);
4120 backoff = lookup_backoff_by_id(&osd->o_backoffs_by_id, m->id);
4122 pr_err("%s osd%d spgid %llu.%xs%d id %llu backoff dne\n",
4123 __func__, osd->o_osd, m->spgid.pgid.pool,
4124 m->spgid.pgid.seed, m->spgid.shard, m->id);
4128 if (hoid_compare(backoff->begin, m->begin) &&
4129 hoid_compare(backoff->end, m->end)) {
4130 pr_err("%s osd%d spgid %llu.%xs%d id %llu bad range?\n",
4131 __func__, osd->o_osd, m->spgid.pgid.pool,
4132 m->spgid.pgid.seed, m->spgid.shard, m->id);
4133 /* unblock it anyway... */
4136 spg = lookup_spg_mapping(&osd->o_backoff_mappings, &backoff->spgid);
4139 erase_backoff(&spg->backoffs, backoff);
4140 erase_backoff_by_id(&osd->o_backoffs_by_id, backoff);
4141 free_backoff(backoff);
4143 if (RB_EMPTY_ROOT(&spg->backoffs)) {
4144 erase_spg_mapping(&osd->o_backoff_mappings, spg);
4145 free_spg_mapping(spg);
4148 for (n = rb_first(&osd->o_requests); n; n = rb_next(n)) {
4149 struct ceph_osd_request *req =
4150 rb_entry(n, struct ceph_osd_request, r_node);
4152 if (!ceph_spg_compare(&req->r_t.spgid, &m->spgid)) {
4154 * Match against @m, not @backoff -- the PG may
4155 * have split on the OSD.
4157 if (target_contained_by(&req->r_t, m->begin, m->end)) {
4159 * If no other installed backoff applies,
4168 static void handle_backoff(struct ceph_osd *osd, struct ceph_msg *msg)
4170 struct ceph_osd_client *osdc = osd->o_osdc;
4171 struct MOSDBackoff m;
4174 down_read(&osdc->lock);
4175 if (!osd_registered(osd)) {
4176 dout("%s osd%d unknown\n", __func__, osd->o_osd);
4177 up_read(&osdc->lock);
4180 WARN_ON(osd->o_osd != le64_to_cpu(msg->hdr.src.num));
4182 mutex_lock(&osd->lock);
4183 ret = decode_MOSDBackoff(msg, &m);
4185 pr_err("failed to decode MOSDBackoff: %d\n", ret);
4191 case CEPH_OSD_BACKOFF_OP_BLOCK:
4192 handle_backoff_block(osd, &m);
4194 case CEPH_OSD_BACKOFF_OP_UNBLOCK:
4195 handle_backoff_unblock(osd, &m);
4198 pr_err("%s osd%d unknown op %d\n", __func__, osd->o_osd, m.op);
4205 mutex_unlock(&osd->lock);
4206 up_read(&osdc->lock);
4210 * Process osd watch notifications
4212 static void handle_watch_notify(struct ceph_osd_client *osdc,
4213 struct ceph_msg *msg)
4215 void *p = msg->front.iov_base;
4216 void *const end = p + msg->front.iov_len;
4217 struct ceph_osd_linger_request *lreq;
4218 struct linger_work *lwork;
4219 u8 proto_ver, opcode;
4220 u64 cookie, notify_id;
4221 u64 notifier_id = 0;
4222 s32 return_code = 0;
4223 void *payload = NULL;
4224 u32 payload_len = 0;
4226 ceph_decode_8_safe(&p, end, proto_ver, bad);
4227 ceph_decode_8_safe(&p, end, opcode, bad);
4228 ceph_decode_64_safe(&p, end, cookie, bad);
4229 p += 8; /* skip ver */
4230 ceph_decode_64_safe(&p, end, notify_id, bad);
4232 if (proto_ver >= 1) {
4233 ceph_decode_32_safe(&p, end, payload_len, bad);
4234 ceph_decode_need(&p, end, payload_len, bad);
4239 if (le16_to_cpu(msg->hdr.version) >= 2)
4240 ceph_decode_32_safe(&p, end, return_code, bad);
4242 if (le16_to_cpu(msg->hdr.version) >= 3)
4243 ceph_decode_64_safe(&p, end, notifier_id, bad);
4245 down_read(&osdc->lock);
4246 lreq = lookup_linger_osdc(&osdc->linger_requests, cookie);
4248 dout("%s opcode %d cookie %llu dne\n", __func__, opcode,
4250 goto out_unlock_osdc;
4253 mutex_lock(&lreq->lock);
4254 dout("%s opcode %d cookie %llu lreq %p is_watch %d\n", __func__,
4255 opcode, cookie, lreq, lreq->is_watch);
4256 if (opcode == CEPH_WATCH_EVENT_DISCONNECT) {
4257 if (!lreq->last_error) {
4258 lreq->last_error = -ENOTCONN;
4259 queue_watch_error(lreq);
4261 } else if (!lreq->is_watch) {
4262 /* CEPH_WATCH_EVENT_NOTIFY_COMPLETE */
4263 if (lreq->notify_id && lreq->notify_id != notify_id) {
4264 dout("lreq %p notify_id %llu != %llu, ignoring\n", lreq,
4265 lreq->notify_id, notify_id);
4266 } else if (!completion_done(&lreq->notify_finish_wait)) {
4267 struct ceph_msg_data *data =
4268 list_first_entry_or_null(&msg->data,
4269 struct ceph_msg_data,
4273 if (lreq->preply_pages) {
4274 WARN_ON(data->type !=
4275 CEPH_MSG_DATA_PAGES);
4276 *lreq->preply_pages = data->pages;
4277 *lreq->preply_len = data->length;
4279 ceph_release_page_vector(data->pages,
4280 calc_pages_for(0, data->length));
4283 lreq->notify_finish_error = return_code;
4284 complete_all(&lreq->notify_finish_wait);
4287 /* CEPH_WATCH_EVENT_NOTIFY */
4288 lwork = lwork_alloc(lreq, do_watch_notify);
4290 pr_err("failed to allocate notify-lwork\n");
4291 goto out_unlock_lreq;
4294 lwork->notify.notify_id = notify_id;
4295 lwork->notify.notifier_id = notifier_id;
4296 lwork->notify.payload = payload;
4297 lwork->notify.payload_len = payload_len;
4298 lwork->notify.msg = ceph_msg_get(msg);
4303 mutex_unlock(&lreq->lock);
4305 up_read(&osdc->lock);
4309 pr_err("osdc handle_watch_notify corrupt msg\n");
4313 * Register request, send initial attempt.
4315 int ceph_osdc_start_request(struct ceph_osd_client *osdc,
4316 struct ceph_osd_request *req,
4319 down_read(&osdc->lock);
4320 submit_request(req, false);
4321 up_read(&osdc->lock);
4325 EXPORT_SYMBOL(ceph_osdc_start_request);
4328 * Unregister a registered request. The request is not completed:
4329 * ->r_result isn't set and __complete_request() isn't called.
4331 void ceph_osdc_cancel_request(struct ceph_osd_request *req)
4333 struct ceph_osd_client *osdc = req->r_osdc;
4335 down_write(&osdc->lock);
4337 cancel_request(req);
4338 up_write(&osdc->lock);
4340 EXPORT_SYMBOL(ceph_osdc_cancel_request);
4343 * @timeout: in jiffies, 0 means "wait forever"
4345 static int wait_request_timeout(struct ceph_osd_request *req,
4346 unsigned long timeout)
4350 dout("%s req %p tid %llu\n", __func__, req, req->r_tid);
4351 left = wait_for_completion_killable_timeout(&req->r_completion,
4352 ceph_timeout_jiffies(timeout));
4354 left = left ?: -ETIMEDOUT;
4355 ceph_osdc_cancel_request(req);
4357 left = req->r_result; /* completed */
4364 * wait for a request to complete
4366 int ceph_osdc_wait_request(struct ceph_osd_client *osdc,
4367 struct ceph_osd_request *req)
4369 return wait_request_timeout(req, 0);
4371 EXPORT_SYMBOL(ceph_osdc_wait_request);
4374 * sync - wait for all in-flight requests to flush. avoid starvation.
4376 void ceph_osdc_sync(struct ceph_osd_client *osdc)
4378 struct rb_node *n, *p;
4379 u64 last_tid = atomic64_read(&osdc->last_tid);
4382 down_read(&osdc->lock);
4383 for (n = rb_first(&osdc->osds); n; n = rb_next(n)) {
4384 struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
4386 mutex_lock(&osd->lock);
4387 for (p = rb_first(&osd->o_requests); p; p = rb_next(p)) {
4388 struct ceph_osd_request *req =
4389 rb_entry(p, struct ceph_osd_request, r_node);
4391 if (req->r_tid > last_tid)
4394 if (!(req->r_flags & CEPH_OSD_FLAG_WRITE))
4397 ceph_osdc_get_request(req);
4398 mutex_unlock(&osd->lock);
4399 up_read(&osdc->lock);
4400 dout("%s waiting on req %p tid %llu last_tid %llu\n",
4401 __func__, req, req->r_tid, last_tid);
4402 wait_for_completion(&req->r_completion);
4403 ceph_osdc_put_request(req);
4407 mutex_unlock(&osd->lock);
4410 up_read(&osdc->lock);
4411 dout("%s done last_tid %llu\n", __func__, last_tid);
4413 EXPORT_SYMBOL(ceph_osdc_sync);
4415 static struct ceph_osd_request *
4416 alloc_linger_request(struct ceph_osd_linger_request *lreq)
4418 struct ceph_osd_request *req;
4420 req = ceph_osdc_alloc_request(lreq->osdc, NULL, 1, false, GFP_NOIO);
4424 ceph_oid_copy(&req->r_base_oid, &lreq->t.base_oid);
4425 ceph_oloc_copy(&req->r_base_oloc, &lreq->t.base_oloc);
4427 if (ceph_osdc_alloc_messages(req, GFP_NOIO)) {
4428 ceph_osdc_put_request(req);
4436 * Returns a handle, caller owns a ref.
4438 struct ceph_osd_linger_request *
4439 ceph_osdc_watch(struct ceph_osd_client *osdc,
4440 struct ceph_object_id *oid,
4441 struct ceph_object_locator *oloc,
4442 rados_watchcb2_t wcb,
4443 rados_watcherrcb_t errcb,
4446 struct ceph_osd_linger_request *lreq;
4449 lreq = linger_alloc(osdc);
4451 return ERR_PTR(-ENOMEM);
4453 lreq->is_watch = true;
4455 lreq->errcb = errcb;
4457 lreq->watch_valid_thru = jiffies;
4459 ceph_oid_copy(&lreq->t.base_oid, oid);
4460 ceph_oloc_copy(&lreq->t.base_oloc, oloc);
4461 lreq->t.flags = CEPH_OSD_FLAG_WRITE;
4462 ktime_get_real_ts(&lreq->mtime);
4464 lreq->reg_req = alloc_linger_request(lreq);
4465 if (!lreq->reg_req) {
4470 lreq->ping_req = alloc_linger_request(lreq);
4471 if (!lreq->ping_req) {
4476 down_write(&osdc->lock);
4477 linger_register(lreq); /* before osd_req_op_* */
4478 osd_req_op_watch_init(lreq->reg_req, 0, lreq->linger_id,
4479 CEPH_OSD_WATCH_OP_WATCH);
4480 osd_req_op_watch_init(lreq->ping_req, 0, lreq->linger_id,
4481 CEPH_OSD_WATCH_OP_PING);
4482 linger_submit(lreq);
4483 up_write(&osdc->lock);
4485 ret = linger_reg_commit_wait(lreq);
4487 linger_cancel(lreq);
4495 return ERR_PTR(ret);
4497 EXPORT_SYMBOL(ceph_osdc_watch);
4502 * Times out after mount_timeout to preserve rbd unmap behaviour
4503 * introduced in 2894e1d76974 ("rbd: timeout watch teardown on unmap
4504 * with mount_timeout").
4506 int ceph_osdc_unwatch(struct ceph_osd_client *osdc,
4507 struct ceph_osd_linger_request *lreq)
4509 struct ceph_options *opts = osdc->client->options;
4510 struct ceph_osd_request *req;
4513 req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO);
4517 ceph_oid_copy(&req->r_base_oid, &lreq->t.base_oid);
4518 ceph_oloc_copy(&req->r_base_oloc, &lreq->t.base_oloc);
4519 req->r_flags = CEPH_OSD_FLAG_WRITE;
4520 ktime_get_real_ts(&req->r_mtime);
4521 osd_req_op_watch_init(req, 0, lreq->linger_id,
4522 CEPH_OSD_WATCH_OP_UNWATCH);
4524 ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
4528 ceph_osdc_start_request(osdc, req, false);
4529 linger_cancel(lreq);
4531 ret = wait_request_timeout(req, opts->mount_timeout);
4534 ceph_osdc_put_request(req);
4537 EXPORT_SYMBOL(ceph_osdc_unwatch);
4539 static int osd_req_op_notify_ack_init(struct ceph_osd_request *req, int which,
4540 u64 notify_id, u64 cookie, void *payload,
4543 struct ceph_osd_req_op *op;
4544 struct ceph_pagelist *pl;
4547 op = _osd_req_op_init(req, which, CEPH_OSD_OP_NOTIFY_ACK, 0);
4549 pl = kmalloc(sizeof(*pl), GFP_NOIO);
4553 ceph_pagelist_init(pl);
4554 ret = ceph_pagelist_encode_64(pl, notify_id);
4555 ret |= ceph_pagelist_encode_64(pl, cookie);
4557 ret |= ceph_pagelist_encode_32(pl, payload_len);
4558 ret |= ceph_pagelist_append(pl, payload, payload_len);
4560 ret |= ceph_pagelist_encode_32(pl, 0);
4563 ceph_pagelist_release(pl);
4567 ceph_osd_data_pagelist_init(&op->notify_ack.request_data, pl);
4568 op->indata_len = pl->length;
4572 int ceph_osdc_notify_ack(struct ceph_osd_client *osdc,
4573 struct ceph_object_id *oid,
4574 struct ceph_object_locator *oloc,
4580 struct ceph_osd_request *req;
4583 req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO);
4587 ceph_oid_copy(&req->r_base_oid, oid);
4588 ceph_oloc_copy(&req->r_base_oloc, oloc);
4589 req->r_flags = CEPH_OSD_FLAG_READ;
4591 ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
4595 ret = osd_req_op_notify_ack_init(req, 0, notify_id, cookie, payload,
4600 ceph_osdc_start_request(osdc, req, false);
4601 ret = ceph_osdc_wait_request(osdc, req);
4604 ceph_osdc_put_request(req);
4607 EXPORT_SYMBOL(ceph_osdc_notify_ack);
4609 static int osd_req_op_notify_init(struct ceph_osd_request *req, int which,
4610 u64 cookie, u32 prot_ver, u32 timeout,
4611 void *payload, size_t payload_len)
4613 struct ceph_osd_req_op *op;
4614 struct ceph_pagelist *pl;
4617 op = _osd_req_op_init(req, which, CEPH_OSD_OP_NOTIFY, 0);
4618 op->notify.cookie = cookie;
4620 pl = kmalloc(sizeof(*pl), GFP_NOIO);
4624 ceph_pagelist_init(pl);
4625 ret = ceph_pagelist_encode_32(pl, 1); /* prot_ver */
4626 ret |= ceph_pagelist_encode_32(pl, timeout);
4627 ret |= ceph_pagelist_encode_32(pl, payload_len);
4628 ret |= ceph_pagelist_append(pl, payload, payload_len);
4630 ceph_pagelist_release(pl);
4634 ceph_osd_data_pagelist_init(&op->notify.request_data, pl);
4635 op->indata_len = pl->length;
4640 * @timeout: in seconds
4642 * @preply_{pages,len} are initialized both on success and error.
4643 * The caller is responsible for:
4645 * ceph_release_page_vector(reply_pages, calc_pages_for(0, reply_len))
4647 int ceph_osdc_notify(struct ceph_osd_client *osdc,
4648 struct ceph_object_id *oid,
4649 struct ceph_object_locator *oloc,
4653 struct page ***preply_pages,
4656 struct ceph_osd_linger_request *lreq;
4657 struct page **pages;
4662 *preply_pages = NULL;
4666 lreq = linger_alloc(osdc);
4670 lreq->preply_pages = preply_pages;
4671 lreq->preply_len = preply_len;
4673 ceph_oid_copy(&lreq->t.base_oid, oid);
4674 ceph_oloc_copy(&lreq->t.base_oloc, oloc);
4675 lreq->t.flags = CEPH_OSD_FLAG_READ;
4677 lreq->reg_req = alloc_linger_request(lreq);
4678 if (!lreq->reg_req) {
4684 pages = ceph_alloc_page_vector(1, GFP_NOIO);
4685 if (IS_ERR(pages)) {
4686 ret = PTR_ERR(pages);
4690 down_write(&osdc->lock);
4691 linger_register(lreq); /* before osd_req_op_* */
4692 ret = osd_req_op_notify_init(lreq->reg_req, 0, lreq->linger_id, 1,
4693 timeout, payload, payload_len);
4695 linger_unregister(lreq);
4696 up_write(&osdc->lock);
4697 ceph_release_page_vector(pages, 1);
4700 ceph_osd_data_pages_init(osd_req_op_data(lreq->reg_req, 0, notify,
4702 pages, PAGE_SIZE, 0, false, true);
4703 linger_submit(lreq);
4704 up_write(&osdc->lock);
4706 ret = linger_reg_commit_wait(lreq);
4708 ret = linger_notify_finish_wait(lreq);
4710 dout("lreq %p failed to initiate notify %d\n", lreq, ret);
4712 linger_cancel(lreq);
4717 EXPORT_SYMBOL(ceph_osdc_notify);
4720 * Return the number of milliseconds since the watch was last
4721 * confirmed, or an error. If there is an error, the watch is no
4722 * longer valid, and should be destroyed with ceph_osdc_unwatch().
4724 int ceph_osdc_watch_check(struct ceph_osd_client *osdc,
4725 struct ceph_osd_linger_request *lreq)
4727 unsigned long stamp, age;
4730 down_read(&osdc->lock);
4731 mutex_lock(&lreq->lock);
4732 stamp = lreq->watch_valid_thru;
4733 if (!list_empty(&lreq->pending_lworks)) {
4734 struct linger_work *lwork =
4735 list_first_entry(&lreq->pending_lworks,
4739 if (time_before(lwork->queued_stamp, stamp))
4740 stamp = lwork->queued_stamp;
4742 age = jiffies - stamp;
4743 dout("%s lreq %p linger_id %llu age %lu last_error %d\n", __func__,
4744 lreq, lreq->linger_id, age, lreq->last_error);
4745 /* we are truncating to msecs, so return a safe upper bound */
4746 ret = lreq->last_error ?: 1 + jiffies_to_msecs(age);
4748 mutex_unlock(&lreq->lock);
4749 up_read(&osdc->lock);
4753 static int decode_watcher(void **p, void *end, struct ceph_watch_item *item)
4759 ret = ceph_start_decoding(p, end, 2, "watch_item_t",
4760 &struct_v, &struct_len);
4764 ceph_decode_copy(p, &item->name, sizeof(item->name));
4765 item->cookie = ceph_decode_64(p);
4766 *p += 4; /* skip timeout_seconds */
4767 if (struct_v >= 2) {
4768 ceph_decode_copy(p, &item->addr, sizeof(item->addr));
4769 ceph_decode_addr(&item->addr);
4772 dout("%s %s%llu cookie %llu addr %s\n", __func__,
4773 ENTITY_NAME(item->name), item->cookie,
4774 ceph_pr_addr(&item->addr.in_addr));
4778 static int decode_watchers(void **p, void *end,
4779 struct ceph_watch_item **watchers,
4787 ret = ceph_start_decoding(p, end, 1, "obj_list_watch_response_t",
4788 &struct_v, &struct_len);
4792 *num_watchers = ceph_decode_32(p);
4793 *watchers = kcalloc(*num_watchers, sizeof(**watchers), GFP_NOIO);
4797 for (i = 0; i < *num_watchers; i++) {
4798 ret = decode_watcher(p, end, *watchers + i);
4809 * On success, the caller is responsible for:
4813 int ceph_osdc_list_watchers(struct ceph_osd_client *osdc,
4814 struct ceph_object_id *oid,
4815 struct ceph_object_locator *oloc,
4816 struct ceph_watch_item **watchers,
4819 struct ceph_osd_request *req;
4820 struct page **pages;
4823 req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO);
4827 ceph_oid_copy(&req->r_base_oid, oid);
4828 ceph_oloc_copy(&req->r_base_oloc, oloc);
4829 req->r_flags = CEPH_OSD_FLAG_READ;
4831 ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
4835 pages = ceph_alloc_page_vector(1, GFP_NOIO);
4836 if (IS_ERR(pages)) {
4837 ret = PTR_ERR(pages);
4841 osd_req_op_init(req, 0, CEPH_OSD_OP_LIST_WATCHERS, 0);
4842 ceph_osd_data_pages_init(osd_req_op_data(req, 0, list_watchers,
4844 pages, PAGE_SIZE, 0, false, true);
4846 ceph_osdc_start_request(osdc, req, false);
4847 ret = ceph_osdc_wait_request(osdc, req);
4849 void *p = page_address(pages[0]);
4850 void *const end = p + req->r_ops[0].outdata_len;
4852 ret = decode_watchers(&p, end, watchers, num_watchers);
4856 ceph_osdc_put_request(req);
4859 EXPORT_SYMBOL(ceph_osdc_list_watchers);
4862 * Call all pending notify callbacks - for use after a watch is
4863 * unregistered, to make sure no more callbacks for it will be invoked
4865 void ceph_osdc_flush_notifies(struct ceph_osd_client *osdc)
4867 dout("%s osdc %p\n", __func__, osdc);
4868 flush_workqueue(osdc->notify_wq);
4870 EXPORT_SYMBOL(ceph_osdc_flush_notifies);
4872 void ceph_osdc_maybe_request_map(struct ceph_osd_client *osdc)
4874 down_read(&osdc->lock);
4875 maybe_request_map(osdc);
4876 up_read(&osdc->lock);
4878 EXPORT_SYMBOL(ceph_osdc_maybe_request_map);
4881 * Execute an OSD class method on an object.
4883 * @flags: CEPH_OSD_FLAG_*
4884 * @resp_len: in/out param for reply length
4886 int ceph_osdc_call(struct ceph_osd_client *osdc,
4887 struct ceph_object_id *oid,
4888 struct ceph_object_locator *oloc,
4889 const char *class, const char *method,
4891 struct page *req_page, size_t req_len,
4892 struct page *resp_page, size_t *resp_len)
4894 struct ceph_osd_request *req;
4897 if (req_len > PAGE_SIZE || (resp_page && *resp_len > PAGE_SIZE))
4900 req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO);
4904 ceph_oid_copy(&req->r_base_oid, oid);
4905 ceph_oloc_copy(&req->r_base_oloc, oloc);
4906 req->r_flags = flags;
4908 ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
4912 osd_req_op_cls_init(req, 0, CEPH_OSD_OP_CALL, class, method);
4914 osd_req_op_cls_request_data_pages(req, 0, &req_page, req_len,
4917 osd_req_op_cls_response_data_pages(req, 0, &resp_page,
4918 *resp_len, 0, false, false);
4920 ceph_osdc_start_request(osdc, req, false);
4921 ret = ceph_osdc_wait_request(osdc, req);
4923 ret = req->r_ops[0].rval;
4925 *resp_len = req->r_ops[0].outdata_len;
4929 ceph_osdc_put_request(req);
4932 EXPORT_SYMBOL(ceph_osdc_call);
4937 int ceph_osdc_init(struct ceph_osd_client *osdc, struct ceph_client *client)
4942 osdc->client = client;
4943 init_rwsem(&osdc->lock);
4944 osdc->osds = RB_ROOT;
4945 INIT_LIST_HEAD(&osdc->osd_lru);
4946 spin_lock_init(&osdc->osd_lru_lock);
4947 osd_init(&osdc->homeless_osd);
4948 osdc->homeless_osd.o_osdc = osdc;
4949 osdc->homeless_osd.o_osd = CEPH_HOMELESS_OSD;
4950 osdc->last_linger_id = CEPH_LINGER_ID_START;
4951 osdc->linger_requests = RB_ROOT;
4952 osdc->map_checks = RB_ROOT;
4953 osdc->linger_map_checks = RB_ROOT;
4954 INIT_DELAYED_WORK(&osdc->timeout_work, handle_timeout);
4955 INIT_DELAYED_WORK(&osdc->osds_timeout_work, handle_osds_timeout);
4958 osdc->osdmap = ceph_osdmap_alloc();
4962 osdc->req_mempool = mempool_create_slab_pool(10,
4963 ceph_osd_request_cache);
4964 if (!osdc->req_mempool)
4967 err = ceph_msgpool_init(&osdc->msgpool_op, CEPH_MSG_OSD_OP,
4968 PAGE_SIZE, 10, true, "osd_op");
4971 err = ceph_msgpool_init(&osdc->msgpool_op_reply, CEPH_MSG_OSD_OPREPLY,
4972 PAGE_SIZE, 10, true, "osd_op_reply");
4977 osdc->notify_wq = create_singlethread_workqueue("ceph-watch-notify");
4978 if (!osdc->notify_wq)
4979 goto out_msgpool_reply;
4981 schedule_delayed_work(&osdc->timeout_work,
4982 osdc->client->options->osd_keepalive_timeout);
4983 schedule_delayed_work(&osdc->osds_timeout_work,
4984 round_jiffies_relative(osdc->client->options->osd_idle_ttl));
4989 ceph_msgpool_destroy(&osdc->msgpool_op_reply);
4991 ceph_msgpool_destroy(&osdc->msgpool_op);
4993 mempool_destroy(osdc->req_mempool);
4995 ceph_osdmap_destroy(osdc->osdmap);
5000 void ceph_osdc_stop(struct ceph_osd_client *osdc)
5002 flush_workqueue(osdc->notify_wq);
5003 destroy_workqueue(osdc->notify_wq);
5004 cancel_delayed_work_sync(&osdc->timeout_work);
5005 cancel_delayed_work_sync(&osdc->osds_timeout_work);
5007 down_write(&osdc->lock);
5008 while (!RB_EMPTY_ROOT(&osdc->osds)) {
5009 struct ceph_osd *osd = rb_entry(rb_first(&osdc->osds),
5010 struct ceph_osd, o_node);
5013 up_write(&osdc->lock);
5014 WARN_ON(refcount_read(&osdc->homeless_osd.o_ref) != 1);
5015 osd_cleanup(&osdc->homeless_osd);
5017 WARN_ON(!list_empty(&osdc->osd_lru));
5018 WARN_ON(!RB_EMPTY_ROOT(&osdc->linger_requests));
5019 WARN_ON(!RB_EMPTY_ROOT(&osdc->map_checks));
5020 WARN_ON(!RB_EMPTY_ROOT(&osdc->linger_map_checks));
5021 WARN_ON(atomic_read(&osdc->num_requests));
5022 WARN_ON(atomic_read(&osdc->num_homeless));
5024 ceph_osdmap_destroy(osdc->osdmap);
5025 mempool_destroy(osdc->req_mempool);
5026 ceph_msgpool_destroy(&osdc->msgpool_op);
5027 ceph_msgpool_destroy(&osdc->msgpool_op_reply);
5031 * Read some contiguous pages. If we cross a stripe boundary, shorten
5032 * *plen. Return number of bytes read, or error.
5034 int ceph_osdc_readpages(struct ceph_osd_client *osdc,
5035 struct ceph_vino vino, struct ceph_file_layout *layout,
5037 u32 truncate_seq, u64 truncate_size,
5038 struct page **pages, int num_pages, int page_align)
5040 struct ceph_osd_request *req;
5043 dout("readpages on ino %llx.%llx on %llu~%llu\n", vino.ino,
5044 vino.snap, off, *plen);
5045 req = ceph_osdc_new_request(osdc, layout, vino, off, plen, 0, 1,
5046 CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ,
5047 NULL, truncate_seq, truncate_size,
5050 return PTR_ERR(req);
5052 /* it may be a short read due to an object boundary */
5053 osd_req_op_extent_osd_data_pages(req, 0,
5054 pages, *plen, page_align, false, false);
5056 dout("readpages final extent is %llu~%llu (%llu bytes align %d)\n",
5057 off, *plen, *plen, page_align);
5059 rc = ceph_osdc_start_request(osdc, req, false);
5061 rc = ceph_osdc_wait_request(osdc, req);
5063 ceph_osdc_put_request(req);
5064 dout("readpages result %d\n", rc);
5067 EXPORT_SYMBOL(ceph_osdc_readpages);
5070 * do a synchronous write on N pages
5072 int ceph_osdc_writepages(struct ceph_osd_client *osdc, struct ceph_vino vino,
5073 struct ceph_file_layout *layout,
5074 struct ceph_snap_context *snapc,
5076 u32 truncate_seq, u64 truncate_size,
5077 struct timespec *mtime,
5078 struct page **pages, int num_pages)
5080 struct ceph_osd_request *req;
5082 int page_align = off & ~PAGE_MASK;
5084 req = ceph_osdc_new_request(osdc, layout, vino, off, &len, 0, 1,
5085 CEPH_OSD_OP_WRITE, CEPH_OSD_FLAG_WRITE,
5086 snapc, truncate_seq, truncate_size,
5089 return PTR_ERR(req);
5091 /* it may be a short write due to an object boundary */
5092 osd_req_op_extent_osd_data_pages(req, 0, pages, len, page_align,
5094 dout("writepages %llu~%llu (%llu bytes)\n", off, len, len);
5096 req->r_mtime = *mtime;
5097 rc = ceph_osdc_start_request(osdc, req, true);
5099 rc = ceph_osdc_wait_request(osdc, req);
5101 ceph_osdc_put_request(req);
5104 dout("writepages result %d\n", rc);
5107 EXPORT_SYMBOL(ceph_osdc_writepages);
5109 int __init ceph_osdc_setup(void)
5111 size_t size = sizeof(struct ceph_osd_request) +
5112 CEPH_OSD_SLAB_OPS * sizeof(struct ceph_osd_req_op);
5114 BUG_ON(ceph_osd_request_cache);
5115 ceph_osd_request_cache = kmem_cache_create("ceph_osd_request", size,
5118 return ceph_osd_request_cache ? 0 : -ENOMEM;
5121 void ceph_osdc_cleanup(void)
5123 BUG_ON(!ceph_osd_request_cache);
5124 kmem_cache_destroy(ceph_osd_request_cache);
5125 ceph_osd_request_cache = NULL;
5129 * handle incoming message
5131 static void dispatch(struct ceph_connection *con, struct ceph_msg *msg)
5133 struct ceph_osd *osd = con->private;
5134 struct ceph_osd_client *osdc = osd->o_osdc;
5135 int type = le16_to_cpu(msg->hdr.type);
5138 case CEPH_MSG_OSD_MAP:
5139 ceph_osdc_handle_map(osdc, msg);
5141 case CEPH_MSG_OSD_OPREPLY:
5142 handle_reply(osd, msg);
5144 case CEPH_MSG_OSD_BACKOFF:
5145 handle_backoff(osd, msg);
5147 case CEPH_MSG_WATCH_NOTIFY:
5148 handle_watch_notify(osdc, msg);
5152 pr_err("received unknown message type %d %s\n", type,
5153 ceph_msg_type_name(type));
5160 * Lookup and return message for incoming reply. Don't try to do
5161 * anything about a larger than preallocated data portion of the
5162 * message at the moment - for now, just skip the message.
5164 static struct ceph_msg *get_reply(struct ceph_connection *con,
5165 struct ceph_msg_header *hdr,
5168 struct ceph_osd *osd = con->private;
5169 struct ceph_osd_client *osdc = osd->o_osdc;
5170 struct ceph_msg *m = NULL;
5171 struct ceph_osd_request *req;
5172 int front_len = le32_to_cpu(hdr->front_len);
5173 int data_len = le32_to_cpu(hdr->data_len);
5174 u64 tid = le64_to_cpu(hdr->tid);
5176 down_read(&osdc->lock);
5177 if (!osd_registered(osd)) {
5178 dout("%s osd%d unknown, skipping\n", __func__, osd->o_osd);
5180 goto out_unlock_osdc;
5182 WARN_ON(osd->o_osd != le64_to_cpu(hdr->src.num));
5184 mutex_lock(&osd->lock);
5185 req = lookup_request(&osd->o_requests, tid);
5187 dout("%s osd%d tid %llu unknown, skipping\n", __func__,
5190 goto out_unlock_session;
5193 ceph_msg_revoke_incoming(req->r_reply);
5195 if (front_len > req->r_reply->front_alloc_len) {
5196 pr_warn("%s osd%d tid %llu front %d > preallocated %d\n",
5197 __func__, osd->o_osd, req->r_tid, front_len,
5198 req->r_reply->front_alloc_len);
5199 m = ceph_msg_new(CEPH_MSG_OSD_OPREPLY, front_len, GFP_NOFS,
5202 goto out_unlock_session;
5203 ceph_msg_put(req->r_reply);
5207 if (data_len > req->r_reply->data_length) {
5208 pr_warn("%s osd%d tid %llu data %d > preallocated %zu, skipping\n",
5209 __func__, osd->o_osd, req->r_tid, data_len,
5210 req->r_reply->data_length);
5213 goto out_unlock_session;
5216 m = ceph_msg_get(req->r_reply);
5217 dout("get_reply tid %lld %p\n", tid, m);
5220 mutex_unlock(&osd->lock);
5222 up_read(&osdc->lock);
5227 * TODO: switch to a msg-owned pagelist
5229 static struct ceph_msg *alloc_msg_with_page_vector(struct ceph_msg_header *hdr)
5232 int type = le16_to_cpu(hdr->type);
5233 u32 front_len = le32_to_cpu(hdr->front_len);
5234 u32 data_len = le32_to_cpu(hdr->data_len);
5236 m = ceph_msg_new(type, front_len, GFP_NOIO, false);
5241 struct page **pages;
5242 struct ceph_osd_data osd_data;
5244 pages = ceph_alloc_page_vector(calc_pages_for(0, data_len),
5246 if (IS_ERR(pages)) {
5251 ceph_osd_data_pages_init(&osd_data, pages, data_len, 0, false,
5253 ceph_osdc_msg_data_add(m, &osd_data);
5259 static struct ceph_msg *alloc_msg(struct ceph_connection *con,
5260 struct ceph_msg_header *hdr,
5263 struct ceph_osd *osd = con->private;
5264 int type = le16_to_cpu(hdr->type);
5268 case CEPH_MSG_OSD_MAP:
5269 case CEPH_MSG_OSD_BACKOFF:
5270 case CEPH_MSG_WATCH_NOTIFY:
5271 return alloc_msg_with_page_vector(hdr);
5272 case CEPH_MSG_OSD_OPREPLY:
5273 return get_reply(con, hdr, skip);
5275 pr_warn("%s osd%d unknown msg type %d, skipping\n", __func__,
5283 * Wrappers to refcount containing ceph_osd struct
5285 static struct ceph_connection *get_osd_con(struct ceph_connection *con)
5287 struct ceph_osd *osd = con->private;
5293 static void put_osd_con(struct ceph_connection *con)
5295 struct ceph_osd *osd = con->private;
5303 * Note: returned pointer is the address of a structure that's
5304 * managed separately. Caller must *not* attempt to free it.
5306 static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con,
5307 int *proto, int force_new)
5309 struct ceph_osd *o = con->private;
5310 struct ceph_osd_client *osdc = o->o_osdc;
5311 struct ceph_auth_client *ac = osdc->client->monc.auth;
5312 struct ceph_auth_handshake *auth = &o->o_auth;
5314 if (force_new && auth->authorizer) {
5315 ceph_auth_destroy_authorizer(auth->authorizer);
5316 auth->authorizer = NULL;
5318 if (!auth->authorizer) {
5319 int ret = ceph_auth_create_authorizer(ac, CEPH_ENTITY_TYPE_OSD,
5322 return ERR_PTR(ret);
5324 int ret = ceph_auth_update_authorizer(ac, CEPH_ENTITY_TYPE_OSD,
5327 return ERR_PTR(ret);
5329 *proto = ac->protocol;
5335 static int verify_authorizer_reply(struct ceph_connection *con)
5337 struct ceph_osd *o = con->private;
5338 struct ceph_osd_client *osdc = o->o_osdc;
5339 struct ceph_auth_client *ac = osdc->client->monc.auth;
5341 return ceph_auth_verify_authorizer_reply(ac, o->o_auth.authorizer);
5344 static int invalidate_authorizer(struct ceph_connection *con)
5346 struct ceph_osd *o = con->private;
5347 struct ceph_osd_client *osdc = o->o_osdc;
5348 struct ceph_auth_client *ac = osdc->client->monc.auth;
5350 ceph_auth_invalidate_authorizer(ac, CEPH_ENTITY_TYPE_OSD);
5351 return ceph_monc_validate_auth(&osdc->client->monc);
5354 static void osd_reencode_message(struct ceph_msg *msg)
5356 int type = le16_to_cpu(msg->hdr.type);
5358 if (type == CEPH_MSG_OSD_OP)
5359 encode_request_finish(msg);
5362 static int osd_sign_message(struct ceph_msg *msg)
5364 struct ceph_osd *o = msg->con->private;
5365 struct ceph_auth_handshake *auth = &o->o_auth;
5367 return ceph_auth_sign_message(auth, msg);
5370 static int osd_check_message_signature(struct ceph_msg *msg)
5372 struct ceph_osd *o = msg->con->private;
5373 struct ceph_auth_handshake *auth = &o->o_auth;
5375 return ceph_auth_check_message_signature(auth, msg);
5378 static const struct ceph_connection_operations osd_con_ops = {
5381 .dispatch = dispatch,
5382 .get_authorizer = get_authorizer,
5383 .verify_authorizer_reply = verify_authorizer_reply,
5384 .invalidate_authorizer = invalidate_authorizer,
5385 .alloc_msg = alloc_msg,
5386 .reencode_message = osd_reencode_message,
5387 .sign_message = osd_sign_message,
5388 .check_message_signature = osd_check_message_signature,