4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, 2012, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
37 #define DEBUG_SUBSYSTEM S_OSC
39 #include "../../include/linux/libcfs/libcfs.h"
41 #include "../include/lustre_dlm.h"
42 #include "../include/lustre_net.h"
43 #include "../include/lustre/lustre_user.h"
44 #include "../include/obd_cksum.h"
46 #include "../include/lustre_ha.h"
47 #include "../include/lprocfs_status.h"
48 #include "../include/lustre_debug.h"
49 #include "../include/lustre_param.h"
50 #include "../include/lustre_fid.h"
51 #include "../include/obd_class.h"
52 #include "../include/obd.h"
53 #include "osc_internal.h"
54 #include "osc_cl_internal.h"
56 atomic_t osc_pool_req_count;
57 unsigned int osc_reqpool_maxreqcount;
58 struct ptlrpc_request_pool *osc_rq_pool;
60 /* max memory used for request pool, unit is MB */
61 static unsigned int osc_reqpool_mem_max = 5;
62 module_param(osc_reqpool_mem_max, uint, 0444);
64 struct osc_brw_async_args {
70 struct brw_page **aa_ppga;
71 struct client_obd *aa_cli;
72 struct list_head aa_oaps;
73 struct list_head aa_exts;
74 struct cl_req *aa_clerq;
77 struct osc_async_args {
78 struct obd_info *aa_oi;
81 struct osc_setattr_args {
83 obd_enqueue_update_f sa_upcall;
87 struct osc_fsync_args {
88 struct obd_info *fa_oi;
89 obd_enqueue_update_f fa_upcall;
93 struct osc_enqueue_args {
94 struct obd_export *oa_exp;
96 obd_enqueue_update_f oa_upcall;
98 struct ost_lvb *oa_lvb;
99 struct lustre_handle *oa_lockh;
100 struct ldlm_enqueue_info *oa_ei;
101 unsigned int oa_agl:1;
104 static void osc_release_ppga(struct brw_page **ppga, u32 count);
105 static int brw_interpret(const struct lu_env *env,
106 struct ptlrpc_request *req, void *data, int rc);
107 int osc_cleanup(struct obd_device *obd);
109 /* Pack OSC object metadata for disk storage (LE byte order). */
110 static int osc_packmd(struct obd_export *exp, struct lov_mds_md **lmmp,
111 struct lov_stripe_md *lsm)
115 lmm_size = sizeof(**lmmp);
119 if (*lmmp != NULL && lsm == NULL) {
123 } else if (unlikely(lsm != NULL && ostid_id(&lsm->lsm_oi) == 0)) {
128 *lmmp = kzalloc(lmm_size, GFP_NOFS);
134 ostid_cpu_to_le(&lsm->lsm_oi, &(*lmmp)->lmm_oi);
139 /* Unpack OSC object metadata from disk storage (LE byte order). */
140 static int osc_unpackmd(struct obd_export *exp, struct lov_stripe_md **lsmp,
141 struct lov_mds_md *lmm, int lmm_bytes)
144 struct obd_import *imp = class_exp2cliimp(exp);
147 if (lmm_bytes < sizeof(*lmm)) {
148 CERROR("%s: lov_mds_md too small: %d, need %d\n",
149 exp->exp_obd->obd_name, lmm_bytes,
153 /* XXX LOV_MAGIC etc check? */
155 if (unlikely(ostid_id(&lmm->lmm_oi) == 0)) {
156 CERROR("%s: zero lmm_object_id: rc = %d\n",
157 exp->exp_obd->obd_name, -EINVAL);
162 lsm_size = lov_stripe_md_size(1);
166 if (*lsmp != NULL && lmm == NULL) {
167 kfree((*lsmp)->lsm_oinfo[0]);
174 *lsmp = kzalloc(lsm_size, GFP_NOFS);
175 if (unlikely(*lsmp == NULL))
177 (*lsmp)->lsm_oinfo[0] = kzalloc(sizeof(struct lov_oinfo),
179 if (unlikely((*lsmp)->lsm_oinfo[0] == NULL)) {
183 loi_init((*lsmp)->lsm_oinfo[0]);
184 } else if (unlikely(ostid_id(&(*lsmp)->lsm_oi) == 0)) {
189 /* XXX zero *lsmp? */
190 ostid_le_to_cpu(&lmm->lmm_oi, &(*lsmp)->lsm_oi);
193 (imp->imp_connect_data.ocd_connect_flags & OBD_CONNECT_MAXBYTES))
194 (*lsmp)->lsm_maxbytes = imp->imp_connect_data.ocd_maxbytes;
196 (*lsmp)->lsm_maxbytes = LUSTRE_STRIPE_MAXBYTES;
201 static inline void osc_pack_req_body(struct ptlrpc_request *req,
202 struct obd_info *oinfo)
204 struct ost_body *body;
206 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
209 lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa,
213 static int osc_getattr_interpret(const struct lu_env *env,
214 struct ptlrpc_request *req,
215 struct osc_async_args *aa, int rc)
217 struct ost_body *body;
222 body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
224 CDEBUG(D_INODE, "mode: %o\n", body->oa.o_mode);
225 lustre_get_wire_obdo(&req->rq_import->imp_connect_data,
226 aa->aa_oi->oi_oa, &body->oa);
228 /* This should really be sent by the OST */
229 aa->aa_oi->oi_oa->o_blksize = DT_MAX_BRW_SIZE;
230 aa->aa_oi->oi_oa->o_valid |= OBD_MD_FLBLKSZ;
232 CDEBUG(D_INFO, "can't unpack ost_body\n");
234 aa->aa_oi->oi_oa->o_valid = 0;
237 rc = aa->aa_oi->oi_cb_up(aa->aa_oi, rc);
241 static int osc_getattr_async(struct obd_export *exp, struct obd_info *oinfo,
242 struct ptlrpc_request_set *set)
244 struct ptlrpc_request *req;
245 struct osc_async_args *aa;
248 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_GETATTR);
252 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GETATTR);
254 ptlrpc_request_free(req);
258 osc_pack_req_body(req, oinfo);
260 ptlrpc_request_set_replen(req);
261 req->rq_interpret_reply = (ptlrpc_interpterer_t)osc_getattr_interpret;
263 CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
264 aa = ptlrpc_req_async_args(req);
267 ptlrpc_set_add_req(set, req);
271 static int osc_getattr(const struct lu_env *env, struct obd_export *exp,
272 struct obd_info *oinfo)
274 struct ptlrpc_request *req;
275 struct ost_body *body;
278 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_GETATTR);
282 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GETATTR);
284 ptlrpc_request_free(req);
288 osc_pack_req_body(req, oinfo);
290 ptlrpc_request_set_replen(req);
292 rc = ptlrpc_queue_wait(req);
296 body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
302 CDEBUG(D_INODE, "mode: %o\n", body->oa.o_mode);
303 lustre_get_wire_obdo(&req->rq_import->imp_connect_data, oinfo->oi_oa,
306 oinfo->oi_oa->o_blksize = cli_brw_size(exp->exp_obd);
307 oinfo->oi_oa->o_valid |= OBD_MD_FLBLKSZ;
310 ptlrpc_req_finished(req);
314 static int osc_setattr(const struct lu_env *env, struct obd_export *exp,
315 struct obd_info *oinfo, struct obd_trans_info *oti)
317 struct ptlrpc_request *req;
318 struct ost_body *body;
321 LASSERT(oinfo->oi_oa->o_valid & OBD_MD_FLGROUP);
323 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SETATTR);
327 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SETATTR);
329 ptlrpc_request_free(req);
333 osc_pack_req_body(req, oinfo);
335 ptlrpc_request_set_replen(req);
337 rc = ptlrpc_queue_wait(req);
341 body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
347 lustre_get_wire_obdo(&req->rq_import->imp_connect_data, oinfo->oi_oa,
351 ptlrpc_req_finished(req);
355 static int osc_setattr_interpret(const struct lu_env *env,
356 struct ptlrpc_request *req,
357 struct osc_setattr_args *sa, int rc)
359 struct ost_body *body;
364 body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
370 lustre_get_wire_obdo(&req->rq_import->imp_connect_data, sa->sa_oa,
373 rc = sa->sa_upcall(sa->sa_cookie, rc);
377 int osc_setattr_async_base(struct obd_export *exp, struct obd_info *oinfo,
378 struct obd_trans_info *oti,
379 obd_enqueue_update_f upcall, void *cookie,
380 struct ptlrpc_request_set *rqset)
382 struct ptlrpc_request *req;
383 struct osc_setattr_args *sa;
386 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SETATTR);
390 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SETATTR);
392 ptlrpc_request_free(req);
396 if (oti && oinfo->oi_oa->o_valid & OBD_MD_FLCOOKIE)
397 oinfo->oi_oa->o_lcookie = *oti->oti_logcookies;
399 osc_pack_req_body(req, oinfo);
401 ptlrpc_request_set_replen(req);
403 /* do mds to ost setattr asynchronously */
405 /* Do not wait for response. */
406 ptlrpcd_add_req(req);
408 req->rq_interpret_reply =
409 (ptlrpc_interpterer_t)osc_setattr_interpret;
411 CLASSERT(sizeof(*sa) <= sizeof(req->rq_async_args));
412 sa = ptlrpc_req_async_args(req);
413 sa->sa_oa = oinfo->oi_oa;
414 sa->sa_upcall = upcall;
415 sa->sa_cookie = cookie;
417 if (rqset == PTLRPCD_SET)
418 ptlrpcd_add_req(req);
420 ptlrpc_set_add_req(rqset, req);
426 static int osc_setattr_async(struct obd_export *exp, struct obd_info *oinfo,
427 struct obd_trans_info *oti,
428 struct ptlrpc_request_set *rqset)
430 return osc_setattr_async_base(exp, oinfo, oti,
431 oinfo->oi_cb_up, oinfo, rqset);
434 int osc_real_create(struct obd_export *exp, struct obdo *oa,
435 struct lov_stripe_md **ea, struct obd_trans_info *oti)
437 struct ptlrpc_request *req;
438 struct ost_body *body;
439 struct lov_stripe_md *lsm;
447 rc = obd_alloc_memmd(exp, &lsm);
452 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_CREATE);
458 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_CREATE);
460 ptlrpc_request_free(req);
464 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
467 lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
469 ptlrpc_request_set_replen(req);
471 if ((oa->o_valid & OBD_MD_FLFLAGS) &&
472 oa->o_flags == OBD_FL_DELORPHAN) {
474 "delorphan from OST integration");
475 /* Don't resend the delorphan req */
476 req->rq_no_resend = req->rq_no_delay = 1;
479 rc = ptlrpc_queue_wait(req);
483 body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
489 CDEBUG(D_INFO, "oa flags %x\n", oa->o_flags);
490 lustre_get_wire_obdo(&req->rq_import->imp_connect_data, oa, &body->oa);
492 oa->o_blksize = cli_brw_size(exp->exp_obd);
493 oa->o_valid |= OBD_MD_FLBLKSZ;
495 /* XXX LOV STACKING: the lsm that is passed to us from LOV does not
496 * have valid lsm_oinfo data structs, so don't go touching that.
497 * This needs to be fixed in a big way.
499 lsm->lsm_oi = oa->o_oi;
503 oti->oti_transno = lustre_msg_get_transno(req->rq_repmsg);
505 if (oa->o_valid & OBD_MD_FLCOOKIE) {
506 if (!oti->oti_logcookies)
507 oti_alloc_cookies(oti, 1);
508 *oti->oti_logcookies = oa->o_lcookie;
512 CDEBUG(D_HA, "transno: %lld\n",
513 lustre_msg_get_transno(req->rq_repmsg));
515 ptlrpc_req_finished(req);
518 obd_free_memmd(exp, &lsm);
522 int osc_punch_base(struct obd_export *exp, struct obd_info *oinfo,
523 obd_enqueue_update_f upcall, void *cookie,
524 struct ptlrpc_request_set *rqset)
526 struct ptlrpc_request *req;
527 struct osc_setattr_args *sa;
528 struct ost_body *body;
531 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_PUNCH);
535 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_PUNCH);
537 ptlrpc_request_free(req);
540 req->rq_request_portal = OST_IO_PORTAL; /* bug 7198 */
541 ptlrpc_at_set_req_timeout(req);
543 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
545 lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa,
548 ptlrpc_request_set_replen(req);
550 req->rq_interpret_reply = (ptlrpc_interpterer_t)osc_setattr_interpret;
551 CLASSERT(sizeof(*sa) <= sizeof(req->rq_async_args));
552 sa = ptlrpc_req_async_args(req);
553 sa->sa_oa = oinfo->oi_oa;
554 sa->sa_upcall = upcall;
555 sa->sa_cookie = cookie;
556 if (rqset == PTLRPCD_SET)
557 ptlrpcd_add_req(req);
559 ptlrpc_set_add_req(rqset, req);
564 static int osc_sync_interpret(const struct lu_env *env,
565 struct ptlrpc_request *req,
568 struct osc_fsync_args *fa = arg;
569 struct ost_body *body;
574 body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
576 CERROR("can't unpack ost_body\n");
581 *fa->fa_oi->oi_oa = body->oa;
583 rc = fa->fa_upcall(fa->fa_cookie, rc);
587 int osc_sync_base(struct obd_export *exp, struct obd_info *oinfo,
588 obd_enqueue_update_f upcall, void *cookie,
589 struct ptlrpc_request_set *rqset)
591 struct ptlrpc_request *req;
592 struct ost_body *body;
593 struct osc_fsync_args *fa;
596 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SYNC);
600 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SYNC);
602 ptlrpc_request_free(req);
606 /* overload the size and blocks fields in the oa with start/end */
607 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
609 lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa,
612 ptlrpc_request_set_replen(req);
613 req->rq_interpret_reply = osc_sync_interpret;
615 CLASSERT(sizeof(*fa) <= sizeof(req->rq_async_args));
616 fa = ptlrpc_req_async_args(req);
618 fa->fa_upcall = upcall;
619 fa->fa_cookie = cookie;
621 if (rqset == PTLRPCD_SET)
622 ptlrpcd_add_req(req);
624 ptlrpc_set_add_req(rqset, req);
629 /* Find and cancel locally locks matched by @mode in the resource found by
630 * @objid. Found locks are added into @cancel list. Returns the amount of
631 * locks added to @cancels list. */
632 static int osc_resource_get_unused(struct obd_export *exp, struct obdo *oa,
633 struct list_head *cancels,
634 ldlm_mode_t mode, __u64 lock_flags)
636 struct ldlm_namespace *ns = exp->exp_obd->obd_namespace;
637 struct ldlm_res_id res_id;
638 struct ldlm_resource *res;
641 /* Return, i.e. cancel nothing, only if ELC is supported (flag in
642 * export) but disabled through procfs (flag in NS).
644 * This distinguishes from a case when ELC is not supported originally,
645 * when we still want to cancel locks in advance and just cancel them
646 * locally, without sending any RPC. */
647 if (exp_connect_cancelset(exp) && !ns_connect_cancelset(ns))
650 ostid_build_res_name(&oa->o_oi, &res_id);
651 res = ldlm_resource_get(ns, NULL, &res_id, 0, 0);
655 LDLM_RESOURCE_ADDREF(res);
656 count = ldlm_cancel_resource_local(res, cancels, NULL, mode,
657 lock_flags, 0, NULL);
658 LDLM_RESOURCE_DELREF(res);
659 ldlm_resource_putref(res);
663 static int osc_destroy_interpret(const struct lu_env *env,
664 struct ptlrpc_request *req, void *data,
667 struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
669 atomic_dec(&cli->cl_destroy_in_flight);
670 wake_up(&cli->cl_destroy_waitq);
674 static int osc_can_send_destroy(struct client_obd *cli)
676 if (atomic_inc_return(&cli->cl_destroy_in_flight) <=
677 cli->cl_max_rpcs_in_flight) {
678 /* The destroy request can be sent */
681 if (atomic_dec_return(&cli->cl_destroy_in_flight) <
682 cli->cl_max_rpcs_in_flight) {
684 * The counter has been modified between the two atomic
687 wake_up(&cli->cl_destroy_waitq);
692 int osc_create(const struct lu_env *env, struct obd_export *exp,
693 struct obdo *oa, struct lov_stripe_md **ea,
694 struct obd_trans_info *oti)
700 LASSERT(oa->o_valid & OBD_MD_FLGROUP);
702 if ((oa->o_valid & OBD_MD_FLFLAGS) &&
703 oa->o_flags == OBD_FL_RECREATE_OBJS) {
704 return osc_real_create(exp, oa, ea, oti);
707 if (!fid_seq_is_mdt(ostid_seq(&oa->o_oi)))
708 return osc_real_create(exp, oa, ea, oti);
710 /* we should not get here anymore */
716 /* Destroy requests can be async always on the client, and we don't even really
717 * care about the return code since the client cannot do anything at all about
719 * When the MDS is unlinking a filename, it saves the file objects into a
720 * recovery llog, and these object records are cancelled when the OST reports
721 * they were destroyed and sync'd to disk (i.e. transaction committed).
722 * If the client dies, or the OST is down when the object should be destroyed,
723 * the records are not cancelled, and when the OST reconnects to the MDS next,
724 * it will retrieve the llog unlink logs and then sends the log cancellation
725 * cookies to the MDS after committing destroy transactions. */
726 static int osc_destroy(const struct lu_env *env, struct obd_export *exp,
727 struct obdo *oa, struct lov_stripe_md *ea,
728 struct obd_trans_info *oti, struct obd_export *md_export)
730 struct client_obd *cli = &exp->exp_obd->u.cli;
731 struct ptlrpc_request *req;
732 struct ost_body *body;
737 CDEBUG(D_INFO, "oa NULL\n");
741 count = osc_resource_get_unused(exp, oa, &cancels, LCK_PW,
742 LDLM_FL_DISCARD_DATA);
744 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_DESTROY);
746 ldlm_lock_list_put(&cancels, l_bl_ast, count);
750 rc = ldlm_prep_elc_req(exp, req, LUSTRE_OST_VERSION, OST_DESTROY,
753 ptlrpc_request_free(req);
757 req->rq_request_portal = OST_IO_PORTAL; /* bug 7198 */
758 ptlrpc_at_set_req_timeout(req);
760 if (oti != NULL && oa->o_valid & OBD_MD_FLCOOKIE)
761 oa->o_lcookie = *oti->oti_logcookies;
762 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
764 lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
766 ptlrpc_request_set_replen(req);
768 /* If osc_destroy is for destroying the unlink orphan,
769 * sent from MDT to OST, which should not be blocked here,
770 * because the process might be triggered by ptlrpcd, and
771 * it is not good to block ptlrpcd thread (b=16006)*/
772 if (!(oa->o_flags & OBD_FL_DELORPHAN)) {
773 req->rq_interpret_reply = osc_destroy_interpret;
774 if (!osc_can_send_destroy(cli)) {
775 struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP,
779 * Wait until the number of on-going destroy RPCs drops
780 * under max_rpc_in_flight
782 l_wait_event_exclusive(cli->cl_destroy_waitq,
783 osc_can_send_destroy(cli), &lwi);
787 /* Do not wait for response */
788 ptlrpcd_add_req(req);
792 static void osc_announce_cached(struct client_obd *cli, struct obdo *oa,
795 u32 bits = OBD_MD_FLBLOCKS|OBD_MD_FLGRANT;
797 LASSERT(!(oa->o_valid & bits));
800 client_obd_list_lock(&cli->cl_loi_list_lock);
801 oa->o_dirty = cli->cl_dirty;
802 if (unlikely(cli->cl_dirty - cli->cl_dirty_transit >
803 cli->cl_dirty_max)) {
804 CERROR("dirty %lu - %lu > dirty_max %lu\n",
805 cli->cl_dirty, cli->cl_dirty_transit, cli->cl_dirty_max);
807 } else if (unlikely(atomic_read(&obd_dirty_pages) -
808 atomic_read(&obd_dirty_transit_pages) >
809 (long)(obd_max_dirty_pages + 1))) {
810 /* The atomic_read() allowing the atomic_inc() are
811 * not covered by a lock thus they may safely race and trip
812 * this CERROR() unless we add in a small fudge factor (+1). */
813 CERROR("dirty %d - %d > system dirty_max %d\n",
814 atomic_read(&obd_dirty_pages),
815 atomic_read(&obd_dirty_transit_pages),
816 obd_max_dirty_pages);
818 } else if (unlikely(cli->cl_dirty_max - cli->cl_dirty > 0x7fffffff)) {
819 CERROR("dirty %lu - dirty_max %lu too big???\n",
820 cli->cl_dirty, cli->cl_dirty_max);
823 long max_in_flight = (cli->cl_max_pages_per_rpc <<
825 (cli->cl_max_rpcs_in_flight + 1);
826 oa->o_undirty = max(cli->cl_dirty_max, max_in_flight);
828 oa->o_grant = cli->cl_avail_grant + cli->cl_reserved_grant;
829 oa->o_dropped = cli->cl_lost_grant;
830 cli->cl_lost_grant = 0;
831 client_obd_list_unlock(&cli->cl_loi_list_lock);
832 CDEBUG(D_CACHE, "dirty: %llu undirty: %u dropped %u grant: %llu\n",
833 oa->o_dirty, oa->o_undirty, oa->o_dropped, oa->o_grant);
837 void osc_update_next_shrink(struct client_obd *cli)
839 cli->cl_next_shrink_grant =
840 cfs_time_shift(cli->cl_grant_shrink_interval);
841 CDEBUG(D_CACHE, "next time %ld to shrink grant \n",
842 cli->cl_next_shrink_grant);
845 static void __osc_update_grant(struct client_obd *cli, u64 grant)
847 client_obd_list_lock(&cli->cl_loi_list_lock);
848 cli->cl_avail_grant += grant;
849 client_obd_list_unlock(&cli->cl_loi_list_lock);
852 static void osc_update_grant(struct client_obd *cli, struct ost_body *body)
854 if (body->oa.o_valid & OBD_MD_FLGRANT) {
855 CDEBUG(D_CACHE, "got %llu extra grant\n", body->oa.o_grant);
856 __osc_update_grant(cli, body->oa.o_grant);
860 static int osc_set_info_async(const struct lu_env *env, struct obd_export *exp,
861 u32 keylen, void *key, u32 vallen,
862 void *val, struct ptlrpc_request_set *set);
864 static int osc_shrink_grant_interpret(const struct lu_env *env,
865 struct ptlrpc_request *req,
868 struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
869 struct obdo *oa = ((struct osc_brw_async_args *)aa)->aa_oa;
870 struct ost_body *body;
873 __osc_update_grant(cli, oa->o_grant);
877 body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
879 osc_update_grant(cli, body);
881 kmem_cache_free(obdo_cachep, oa);
885 static void osc_shrink_grant_local(struct client_obd *cli, struct obdo *oa)
887 client_obd_list_lock(&cli->cl_loi_list_lock);
888 oa->o_grant = cli->cl_avail_grant / 4;
889 cli->cl_avail_grant -= oa->o_grant;
890 client_obd_list_unlock(&cli->cl_loi_list_lock);
891 if (!(oa->o_valid & OBD_MD_FLFLAGS)) {
892 oa->o_valid |= OBD_MD_FLFLAGS;
895 oa->o_flags |= OBD_FL_SHRINK_GRANT;
896 osc_update_next_shrink(cli);
899 /* Shrink the current grant, either from some large amount to enough for a
900 * full set of in-flight RPCs, or if we have already shrunk to that limit
901 * then to enough for a single RPC. This avoids keeping more grant than
902 * needed, and avoids shrinking the grant piecemeal. */
903 static int osc_shrink_grant(struct client_obd *cli)
905 __u64 target_bytes = (cli->cl_max_rpcs_in_flight + 1) *
906 (cli->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT);
908 client_obd_list_lock(&cli->cl_loi_list_lock);
909 if (cli->cl_avail_grant <= target_bytes)
910 target_bytes = cli->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT;
911 client_obd_list_unlock(&cli->cl_loi_list_lock);
913 return osc_shrink_grant_to_target(cli, target_bytes);
916 int osc_shrink_grant_to_target(struct client_obd *cli, __u64 target_bytes)
919 struct ost_body *body;
921 client_obd_list_lock(&cli->cl_loi_list_lock);
922 /* Don't shrink if we are already above or below the desired limit
923 * We don't want to shrink below a single RPC, as that will negatively
924 * impact block allocation and long-term performance. */
925 if (target_bytes < cli->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT)
926 target_bytes = cli->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT;
928 if (target_bytes >= cli->cl_avail_grant) {
929 client_obd_list_unlock(&cli->cl_loi_list_lock);
932 client_obd_list_unlock(&cli->cl_loi_list_lock);
934 body = kzalloc(sizeof(*body), GFP_NOFS);
938 osc_announce_cached(cli, &body->oa, 0);
940 client_obd_list_lock(&cli->cl_loi_list_lock);
941 body->oa.o_grant = cli->cl_avail_grant - target_bytes;
942 cli->cl_avail_grant = target_bytes;
943 client_obd_list_unlock(&cli->cl_loi_list_lock);
944 if (!(body->oa.o_valid & OBD_MD_FLFLAGS)) {
945 body->oa.o_valid |= OBD_MD_FLFLAGS;
946 body->oa.o_flags = 0;
948 body->oa.o_flags |= OBD_FL_SHRINK_GRANT;
949 osc_update_next_shrink(cli);
951 rc = osc_set_info_async(NULL, cli->cl_import->imp_obd->obd_self_export,
952 sizeof(KEY_GRANT_SHRINK), KEY_GRANT_SHRINK,
953 sizeof(*body), body, NULL);
955 __osc_update_grant(cli, body->oa.o_grant);
960 static int osc_should_shrink_grant(struct client_obd *client)
962 unsigned long time = cfs_time_current();
963 unsigned long next_shrink = client->cl_next_shrink_grant;
965 if ((client->cl_import->imp_connect_data.ocd_connect_flags &
966 OBD_CONNECT_GRANT_SHRINK) == 0)
969 if (cfs_time_aftereq(time, next_shrink - 5 * CFS_TICK)) {
970 /* Get the current RPC size directly, instead of going via:
971 * cli_brw_size(obd->u.cli.cl_import->imp_obd->obd_self_export)
972 * Keep comment here so that it can be found by searching. */
973 int brw_size = client->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT;
975 if (client->cl_import->imp_state == LUSTRE_IMP_FULL &&
976 client->cl_avail_grant > brw_size)
979 osc_update_next_shrink(client);
984 static int osc_grant_shrink_grant_cb(struct timeout_item *item, void *data)
986 struct client_obd *client;
988 list_for_each_entry(client, &item->ti_obd_list,
989 cl_grant_shrink_list) {
990 if (osc_should_shrink_grant(client))
991 osc_shrink_grant(client);
996 static int osc_add_shrink_grant(struct client_obd *client)
1000 rc = ptlrpc_add_timeout_client(client->cl_grant_shrink_interval,
1002 osc_grant_shrink_grant_cb, NULL,
1003 &client->cl_grant_shrink_list);
1005 CERROR("add grant client %s error %d\n",
1006 client->cl_import->imp_obd->obd_name, rc);
1009 CDEBUG(D_CACHE, "add grant client %s \n",
1010 client->cl_import->imp_obd->obd_name);
1011 osc_update_next_shrink(client);
1015 static int osc_del_shrink_grant(struct client_obd *client)
1017 return ptlrpc_del_timeout_client(&client->cl_grant_shrink_list,
1021 static void osc_init_grant(struct client_obd *cli, struct obd_connect_data *ocd)
1024 * ocd_grant is the total grant amount we're expect to hold: if we've
1025 * been evicted, it's the new avail_grant amount, cl_dirty will drop
1026 * to 0 as inflight RPCs fail out; otherwise, it's avail_grant + dirty.
1028 * race is tolerable here: if we're evicted, but imp_state already
1029 * left EVICTED state, then cl_dirty must be 0 already.
1031 client_obd_list_lock(&cli->cl_loi_list_lock);
1032 if (cli->cl_import->imp_state == LUSTRE_IMP_EVICTED)
1033 cli->cl_avail_grant = ocd->ocd_grant;
1035 cli->cl_avail_grant = ocd->ocd_grant - cli->cl_dirty;
1037 if (cli->cl_avail_grant < 0) {
1038 CWARN("%s: available grant < 0: avail/ocd/dirty %ld/%u/%ld\n",
1039 cli->cl_import->imp_obd->obd_name, cli->cl_avail_grant,
1040 ocd->ocd_grant, cli->cl_dirty);
1041 /* workaround for servers which do not have the patch from
1043 cli->cl_avail_grant = ocd->ocd_grant;
1046 /* determine the appropriate chunk size used by osc_extent. */
1047 cli->cl_chunkbits = max_t(int, PAGE_CACHE_SHIFT, ocd->ocd_blocksize);
1048 client_obd_list_unlock(&cli->cl_loi_list_lock);
1050 CDEBUG(D_CACHE, "%s, setting cl_avail_grant: %ld cl_lost_grant: %ld chunk bits: %d\n",
1051 cli->cl_import->imp_obd->obd_name,
1052 cli->cl_avail_grant, cli->cl_lost_grant, cli->cl_chunkbits);
1054 if (ocd->ocd_connect_flags & OBD_CONNECT_GRANT_SHRINK &&
1055 list_empty(&cli->cl_grant_shrink_list))
1056 osc_add_shrink_grant(cli);
1059 /* We assume that the reason this OSC got a short read is because it read
1060 * beyond the end of a stripe file; i.e. lustre is reading a sparse file
1061 * via the LOV, and it _knows_ it's reading inside the file, it's just that
1062 * this stripe never got written at or beyond this stripe offset yet. */
1063 static void handle_short_read(int nob_read, u32 page_count,
1064 struct brw_page **pga)
1069 /* skip bytes read OK */
1070 while (nob_read > 0) {
1071 LASSERT(page_count > 0);
1073 if (pga[i]->count > nob_read) {
1074 /* EOF inside this page */
1075 ptr = kmap(pga[i]->pg) +
1076 (pga[i]->off & ~CFS_PAGE_MASK);
1077 memset(ptr + nob_read, 0, pga[i]->count - nob_read);
1084 nob_read -= pga[i]->count;
1089 /* zero remaining pages */
1090 while (page_count-- > 0) {
1091 ptr = kmap(pga[i]->pg) + (pga[i]->off & ~CFS_PAGE_MASK);
1092 memset(ptr, 0, pga[i]->count);
1098 static int check_write_rcs(struct ptlrpc_request *req,
1099 int requested_nob, int niocount,
1100 u32 page_count, struct brw_page **pga)
1105 remote_rcs = req_capsule_server_sized_get(&req->rq_pill, &RMF_RCS,
1106 sizeof(*remote_rcs) *
1108 if (remote_rcs == NULL) {
1109 CDEBUG(D_INFO, "Missing/short RC vector on BRW_WRITE reply\n");
1113 /* return error if any niobuf was in error */
1114 for (i = 0; i < niocount; i++) {
1115 if ((int)remote_rcs[i] < 0)
1116 return remote_rcs[i];
1118 if (remote_rcs[i] != 0) {
1119 CDEBUG(D_INFO, "rc[%d] invalid (%d) req %p\n",
1120 i, remote_rcs[i], req);
1125 if (req->rq_bulk->bd_nob_transferred != requested_nob) {
1126 CERROR("Unexpected # bytes transferred: %d (requested %d)\n",
1127 req->rq_bulk->bd_nob_transferred, requested_nob);
1134 static inline int can_merge_pages(struct brw_page *p1, struct brw_page *p2)
1136 if (p1->flag != p2->flag) {
1137 unsigned mask = ~(OBD_BRW_FROM_GRANT | OBD_BRW_NOCACHE |
1138 OBD_BRW_SYNC | OBD_BRW_ASYNC|OBD_BRW_NOQUOTA);
1140 /* warn if we try to combine flags that we don't know to be
1141 * safe to combine */
1142 if (unlikely((p1->flag & mask) != (p2->flag & mask))) {
1143 CWARN("Saw flags 0x%x and 0x%x in the same brw, please report this at http://bugs.whamcloud.com/\n",
1144 p1->flag, p2->flag);
1149 return (p1->off + p1->count == p2->off);
1152 static u32 osc_checksum_bulk(int nob, u32 pg_count,
1153 struct brw_page **pga, int opc,
1154 cksum_type_t cksum_type)
1158 struct cfs_crypto_hash_desc *hdesc;
1159 unsigned int bufsize;
1161 unsigned char cfs_alg = cksum_obd2cfs(cksum_type);
1163 LASSERT(pg_count > 0);
1165 hdesc = cfs_crypto_hash_init(cfs_alg, NULL, 0);
1166 if (IS_ERR(hdesc)) {
1167 CERROR("Unable to initialize checksum hash %s\n",
1168 cfs_crypto_hash_name(cfs_alg));
1169 return PTR_ERR(hdesc);
1172 while (nob > 0 && pg_count > 0) {
1173 int count = pga[i]->count > nob ? nob : pga[i]->count;
1175 /* corrupt the data before we compute the checksum, to
1176 * simulate an OST->client data error */
1177 if (i == 0 && opc == OST_READ &&
1178 OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_RECEIVE)) {
1179 unsigned char *ptr = kmap(pga[i]->pg);
1180 int off = pga[i]->off & ~CFS_PAGE_MASK;
1182 memcpy(ptr + off, "bad1", min(4, nob));
1185 cfs_crypto_hash_update_page(hdesc, pga[i]->pg,
1186 pga[i]->off & ~CFS_PAGE_MASK,
1189 "page %p map %p index %lu flags %lx count %u priv %0lx: off %d\n",
1190 pga[i]->pg, pga[i]->pg->mapping, pga[i]->pg->index,
1191 (long)pga[i]->pg->flags, page_count(pga[i]->pg),
1192 page_private(pga[i]->pg),
1193 (int)(pga[i]->off & ~CFS_PAGE_MASK));
1195 nob -= pga[i]->count;
1201 err = cfs_crypto_hash_final(hdesc, (unsigned char *)&cksum, &bufsize);
1204 cfs_crypto_hash_final(hdesc, NULL, NULL);
1206 /* For sending we only compute the wrong checksum instead
1207 * of corrupting the data so it is still correct on a redo */
1208 if (opc == OST_WRITE && OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_SEND))
1214 static int osc_brw_prep_request(int cmd, struct client_obd *cli,
1216 struct lov_stripe_md *lsm, u32 page_count,
1217 struct brw_page **pga,
1218 struct ptlrpc_request **reqp,
1222 struct ptlrpc_request *req;
1223 struct ptlrpc_bulk_desc *desc;
1224 struct ost_body *body;
1225 struct obd_ioobj *ioobj;
1226 struct niobuf_remote *niobuf;
1227 int niocount, i, requested_nob, opc, rc;
1228 struct osc_brw_async_args *aa;
1229 struct req_capsule *pill;
1230 struct brw_page *pg_prev;
1232 if (OBD_FAIL_CHECK(OBD_FAIL_OSC_BRW_PREP_REQ))
1233 return -ENOMEM; /* Recoverable */
1234 if (OBD_FAIL_CHECK(OBD_FAIL_OSC_BRW_PREP_REQ2))
1235 return -EINVAL; /* Fatal */
1237 if ((cmd & OBD_BRW_WRITE) != 0) {
1239 req = ptlrpc_request_alloc_pool(cli->cl_import,
1241 &RQF_OST_BRW_WRITE);
1244 req = ptlrpc_request_alloc(cli->cl_import, &RQF_OST_BRW_READ);
1249 for (niocount = i = 1; i < page_count; i++) {
1250 if (!can_merge_pages(pga[i - 1], pga[i]))
1254 pill = &req->rq_pill;
1255 req_capsule_set_size(pill, &RMF_OBD_IOOBJ, RCL_CLIENT,
1257 req_capsule_set_size(pill, &RMF_NIOBUF_REMOTE, RCL_CLIENT,
1258 niocount * sizeof(*niobuf));
1260 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, opc);
1262 ptlrpc_request_free(req);
1265 req->rq_request_portal = OST_IO_PORTAL; /* bug 7198 */
1266 ptlrpc_at_set_req_timeout(req);
1267 /* ask ptlrpc not to resend on EINPROGRESS since BRWs have their own
1269 req->rq_no_retry_einprogress = 1;
1271 desc = ptlrpc_prep_bulk_imp(req, page_count,
1272 cli->cl_import->imp_connect_data.ocd_brw_size >> LNET_MTU_BITS,
1273 opc == OST_WRITE ? BULK_GET_SOURCE : BULK_PUT_SINK,
1280 /* NB request now owns desc and will free it when it gets freed */
1282 body = req_capsule_client_get(pill, &RMF_OST_BODY);
1283 ioobj = req_capsule_client_get(pill, &RMF_OBD_IOOBJ);
1284 niobuf = req_capsule_client_get(pill, &RMF_NIOBUF_REMOTE);
1285 LASSERT(body != NULL && ioobj != NULL && niobuf != NULL);
1287 lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
1289 obdo_to_ioobj(oa, ioobj);
1290 ioobj->ioo_bufcnt = niocount;
1291 /* The high bits of ioo_max_brw tells server _maximum_ number of bulks
1292 * that might be send for this request. The actual number is decided
1293 * when the RPC is finally sent in ptlrpc_register_bulk(). It sends
1294 * "max - 1" for old client compatibility sending "0", and also so the
1295 * the actual maximum is a power-of-two number, not one less. LU-1431 */
1296 ioobj_max_brw_set(ioobj, desc->bd_md_max_brw);
1297 LASSERT(page_count > 0);
1299 for (requested_nob = i = 0; i < page_count; i++, niobuf++) {
1300 struct brw_page *pg = pga[i];
1301 int poff = pg->off & ~CFS_PAGE_MASK;
1303 LASSERT(pg->count > 0);
1304 /* make sure there is no gap in the middle of page array */
1305 LASSERTF(page_count == 1 ||
1306 (ergo(i == 0, poff + pg->count == PAGE_CACHE_SIZE) &&
1307 ergo(i > 0 && i < page_count - 1,
1308 poff == 0 && pg->count == PAGE_CACHE_SIZE) &&
1309 ergo(i == page_count - 1, poff == 0)),
1310 "i: %d/%d pg: %p off: %llu, count: %u\n",
1311 i, page_count, pg, pg->off, pg->count);
1312 LASSERTF(i == 0 || pg->off > pg_prev->off,
1313 "i %d p_c %u pg %p [pri %lu ind %lu] off %llu prev_pg %p [pri %lu ind %lu] off %llu\n",
1315 pg->pg, page_private(pg->pg), pg->pg->index, pg->off,
1316 pg_prev->pg, page_private(pg_prev->pg),
1317 pg_prev->pg->index, pg_prev->off);
1318 LASSERT((pga[0]->flag & OBD_BRW_SRVLOCK) ==
1319 (pg->flag & OBD_BRW_SRVLOCK));
1321 ptlrpc_prep_bulk_page_pin(desc, pg->pg, poff, pg->count);
1322 requested_nob += pg->count;
1324 if (i > 0 && can_merge_pages(pg_prev, pg)) {
1326 niobuf->len += pg->count;
1328 niobuf->offset = pg->off;
1329 niobuf->len = pg->count;
1330 niobuf->flags = pg->flag;
1335 LASSERTF((void *)(niobuf - niocount) ==
1336 req_capsule_client_get(&req->rq_pill, &RMF_NIOBUF_REMOTE),
1337 "want %p - real %p\n", req_capsule_client_get(&req->rq_pill,
1338 &RMF_NIOBUF_REMOTE), (void *)(niobuf - niocount));
1340 osc_announce_cached(cli, &body->oa, opc == OST_WRITE ? requested_nob:0);
1342 if ((body->oa.o_valid & OBD_MD_FLFLAGS) == 0) {
1343 body->oa.o_valid |= OBD_MD_FLFLAGS;
1344 body->oa.o_flags = 0;
1346 body->oa.o_flags |= OBD_FL_RECOV_RESEND;
1349 if (osc_should_shrink_grant(cli))
1350 osc_shrink_grant_local(cli, &body->oa);
1352 /* size[REQ_REC_OFF] still sizeof (*body) */
1353 if (opc == OST_WRITE) {
1354 if (cli->cl_checksum &&
1355 !sptlrpc_flavor_has_bulk(&req->rq_flvr)) {
1356 /* store cl_cksum_type in a local variable since
1357 * it can be changed via lprocfs */
1358 cksum_type_t cksum_type = cli->cl_cksum_type;
1360 if ((body->oa.o_valid & OBD_MD_FLFLAGS) == 0) {
1361 oa->o_flags &= OBD_FL_LOCAL_MASK;
1362 body->oa.o_flags = 0;
1364 body->oa.o_flags |= cksum_type_pack(cksum_type);
1365 body->oa.o_valid |= OBD_MD_FLCKSUM | OBD_MD_FLFLAGS;
1366 body->oa.o_cksum = osc_checksum_bulk(requested_nob,
1370 CDEBUG(D_PAGE, "checksum at write origin: %x\n",
1372 /* save this in 'oa', too, for later checking */
1373 oa->o_valid |= OBD_MD_FLCKSUM | OBD_MD_FLFLAGS;
1374 oa->o_flags |= cksum_type_pack(cksum_type);
1376 /* clear out the checksum flag, in case this is a
1377 * resend but cl_checksum is no longer set. b=11238 */
1378 oa->o_valid &= ~OBD_MD_FLCKSUM;
1380 oa->o_cksum = body->oa.o_cksum;
1381 /* 1 RC per niobuf */
1382 req_capsule_set_size(pill, &RMF_RCS, RCL_SERVER,
1383 sizeof(__u32) * niocount);
1385 if (cli->cl_checksum &&
1386 !sptlrpc_flavor_has_bulk(&req->rq_flvr)) {
1387 if ((body->oa.o_valid & OBD_MD_FLFLAGS) == 0)
1388 body->oa.o_flags = 0;
1389 body->oa.o_flags |= cksum_type_pack(cli->cl_cksum_type);
1390 body->oa.o_valid |= OBD_MD_FLCKSUM | OBD_MD_FLFLAGS;
1393 ptlrpc_request_set_replen(req);
1395 CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
1396 aa = ptlrpc_req_async_args(req);
1398 aa->aa_requested_nob = requested_nob;
1399 aa->aa_nio_count = niocount;
1400 aa->aa_page_count = page_count;
1404 INIT_LIST_HEAD(&aa->aa_oaps);
1410 ptlrpc_req_finished(req);
1414 static int check_write_checksum(struct obdo *oa, const lnet_process_id_t *peer,
1415 __u32 client_cksum, __u32 server_cksum, int nob,
1416 u32 page_count, struct brw_page **pga,
1417 cksum_type_t client_cksum_type)
1421 cksum_type_t cksum_type;
1423 if (server_cksum == client_cksum) {
1424 CDEBUG(D_PAGE, "checksum %x confirmed\n", client_cksum);
1428 cksum_type = cksum_type_unpack(oa->o_valid & OBD_MD_FLFLAGS ?
1430 new_cksum = osc_checksum_bulk(nob, page_count, pga, OST_WRITE,
1433 if (cksum_type != client_cksum_type)
1434 msg = "the server did not use the checksum type specified in the original request - likely a protocol problem"
1436 else if (new_cksum == server_cksum)
1437 msg = "changed on the client after we checksummed it - likely false positive due to mmap IO (bug 11742)"
1439 else if (new_cksum == client_cksum)
1440 msg = "changed in transit before arrival at OST";
1442 msg = "changed in transit AND doesn't match the original - likely false positive due to mmap IO (bug 11742)"
1445 LCONSOLE_ERROR_MSG(0x132, "BAD WRITE CHECKSUM: %s: from %s inode "DFID
1446 " object "DOSTID" extent [%llu-%llu]\n",
1447 msg, libcfs_nid2str(peer->nid),
1448 oa->o_valid & OBD_MD_FLFID ? oa->o_parent_seq : (__u64)0,
1449 oa->o_valid & OBD_MD_FLFID ? oa->o_parent_oid : 0,
1450 oa->o_valid & OBD_MD_FLFID ? oa->o_parent_ver : 0,
1451 POSTID(&oa->o_oi), pga[0]->off,
1452 pga[page_count-1]->off + pga[page_count-1]->count - 1);
1453 CERROR("original client csum %x (type %x), server csum %x (type %x), client csum now %x\n",
1454 client_cksum, client_cksum_type,
1455 server_cksum, cksum_type, new_cksum);
1459 /* Note rc enters this function as number of bytes transferred */
1460 static int osc_brw_fini_request(struct ptlrpc_request *req, int rc)
1462 struct osc_brw_async_args *aa = (void *)&req->rq_async_args;
1463 const lnet_process_id_t *peer =
1464 &req->rq_import->imp_connection->c_peer;
1465 struct client_obd *cli = aa->aa_cli;
1466 struct ost_body *body;
1467 __u32 client_cksum = 0;
1469 if (rc < 0 && rc != -EDQUOT) {
1470 DEBUG_REQ(D_INFO, req, "Failed request with rc = %d\n", rc);
1474 LASSERTF(req->rq_repmsg != NULL, "rc = %d\n", rc);
1475 body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
1477 DEBUG_REQ(D_INFO, req, "Can't unpack body\n");
1481 /* set/clear over quota flag for a uid/gid */
1482 if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE &&
1483 body->oa.o_valid & (OBD_MD_FLUSRQUOTA | OBD_MD_FLGRPQUOTA)) {
1484 unsigned int qid[MAXQUOTAS] = { body->oa.o_uid, body->oa.o_gid };
1486 CDEBUG(D_QUOTA, "setdq for [%u %u] with valid %#llx, flags %x\n",
1487 body->oa.o_uid, body->oa.o_gid, body->oa.o_valid,
1489 osc_quota_setdq(cli, qid, body->oa.o_valid, body->oa.o_flags);
1492 osc_update_grant(cli, body);
1497 if (aa->aa_oa->o_valid & OBD_MD_FLCKSUM)
1498 client_cksum = aa->aa_oa->o_cksum; /* save for later */
1500 if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE) {
1502 CERROR("Unexpected +ve rc %d\n", rc);
1505 LASSERT(req->rq_bulk->bd_nob == aa->aa_requested_nob);
1507 if (sptlrpc_cli_unwrap_bulk_write(req, req->rq_bulk))
1510 if ((aa->aa_oa->o_valid & OBD_MD_FLCKSUM) && client_cksum &&
1511 check_write_checksum(&body->oa, peer, client_cksum,
1512 body->oa.o_cksum, aa->aa_requested_nob,
1513 aa->aa_page_count, aa->aa_ppga,
1514 cksum_type_unpack(aa->aa_oa->o_flags)))
1517 rc = check_write_rcs(req, aa->aa_requested_nob,
1519 aa->aa_page_count, aa->aa_ppga);
1523 /* The rest of this function executes only for OST_READs */
1525 /* if unwrap_bulk failed, return -EAGAIN to retry */
1526 rc = sptlrpc_cli_unwrap_bulk_read(req, req->rq_bulk, rc);
1532 if (rc > aa->aa_requested_nob) {
1533 CERROR("Unexpected rc %d (%d requested)\n", rc,
1534 aa->aa_requested_nob);
1538 if (rc != req->rq_bulk->bd_nob_transferred) {
1539 CERROR("Unexpected rc %d (%d transferred)\n",
1540 rc, req->rq_bulk->bd_nob_transferred);
1544 if (rc < aa->aa_requested_nob)
1545 handle_short_read(rc, aa->aa_page_count, aa->aa_ppga);
1547 if (body->oa.o_valid & OBD_MD_FLCKSUM) {
1548 static int cksum_counter;
1549 __u32 server_cksum = body->oa.o_cksum;
1552 cksum_type_t cksum_type;
1554 cksum_type = cksum_type_unpack(body->oa.o_valid&OBD_MD_FLFLAGS ?
1555 body->oa.o_flags : 0);
1556 client_cksum = osc_checksum_bulk(rc, aa->aa_page_count,
1557 aa->aa_ppga, OST_READ,
1560 if (peer->nid != req->rq_bulk->bd_sender) {
1562 router = libcfs_nid2str(req->rq_bulk->bd_sender);
1565 if (server_cksum != client_cksum) {
1566 LCONSOLE_ERROR_MSG(0x133, "%s: BAD READ CHECKSUM: from %s%s%s inode " DFID " object " DOSTID " extent [%llu-%llu]\n",
1567 req->rq_import->imp_obd->obd_name,
1568 libcfs_nid2str(peer->nid),
1570 body->oa.o_valid & OBD_MD_FLFID ?
1571 body->oa.o_parent_seq : (__u64)0,
1572 body->oa.o_valid & OBD_MD_FLFID ?
1573 body->oa.o_parent_oid : 0,
1574 body->oa.o_valid & OBD_MD_FLFID ?
1575 body->oa.o_parent_ver : 0,
1576 POSTID(&body->oa.o_oi),
1577 aa->aa_ppga[0]->off,
1578 aa->aa_ppga[aa->aa_page_count-1]->off +
1579 aa->aa_ppga[aa->aa_page_count-1]->count -
1581 CERROR("client %x, server %x, cksum_type %x\n",
1582 client_cksum, server_cksum, cksum_type);
1584 aa->aa_oa->o_cksum = client_cksum;
1588 CDEBUG(D_PAGE, "checksum %x confirmed\n", client_cksum);
1591 } else if (unlikely(client_cksum)) {
1592 static int cksum_missed;
1595 if ((cksum_missed & (-cksum_missed)) == cksum_missed)
1596 CERROR("Checksum %u requested from %s but not sent\n",
1597 cksum_missed, libcfs_nid2str(peer->nid));
1603 lustre_get_wire_obdo(&req->rq_import->imp_connect_data,
1604 aa->aa_oa, &body->oa);
1609 static int osc_brw_redo_request(struct ptlrpc_request *request,
1610 struct osc_brw_async_args *aa, int rc)
1612 struct ptlrpc_request *new_req;
1613 struct osc_brw_async_args *new_aa;
1614 struct osc_async_page *oap;
1616 DEBUG_REQ(rc == -EINPROGRESS ? D_RPCTRACE : D_ERROR, request,
1617 "redo for recoverable error %d", rc);
1619 rc = osc_brw_prep_request(lustre_msg_get_opc(request->rq_reqmsg) ==
1620 OST_WRITE ? OBD_BRW_WRITE : OBD_BRW_READ,
1621 aa->aa_cli, aa->aa_oa,
1622 NULL /* lsm unused by osc currently */,
1623 aa->aa_page_count, aa->aa_ppga,
1628 list_for_each_entry(oap, &aa->aa_oaps, oap_rpc_item) {
1629 if (oap->oap_request != NULL) {
1630 LASSERTF(request == oap->oap_request,
1631 "request %p != oap_request %p\n",
1632 request, oap->oap_request);
1633 if (oap->oap_interrupted) {
1634 ptlrpc_req_finished(new_req);
1639 /* New request takes over pga and oaps from old request.
1640 * Note that copying a list_head doesn't work, need to move it... */
1642 new_req->rq_interpret_reply = request->rq_interpret_reply;
1643 new_req->rq_async_args = request->rq_async_args;
1644 /* cap resend delay to the current request timeout, this is similar to
1645 * what ptlrpc does (see after_reply()) */
1646 if (aa->aa_resends > new_req->rq_timeout)
1647 new_req->rq_sent = ktime_get_real_seconds() + new_req->rq_timeout;
1649 new_req->rq_sent = ktime_get_real_seconds() + aa->aa_resends;
1650 new_req->rq_generation_set = 1;
1651 new_req->rq_import_generation = request->rq_import_generation;
1653 new_aa = ptlrpc_req_async_args(new_req);
1655 INIT_LIST_HEAD(&new_aa->aa_oaps);
1656 list_splice_init(&aa->aa_oaps, &new_aa->aa_oaps);
1657 INIT_LIST_HEAD(&new_aa->aa_exts);
1658 list_splice_init(&aa->aa_exts, &new_aa->aa_exts);
1659 new_aa->aa_resends = aa->aa_resends;
1661 list_for_each_entry(oap, &new_aa->aa_oaps, oap_rpc_item) {
1662 if (oap->oap_request) {
1663 ptlrpc_req_finished(oap->oap_request);
1664 oap->oap_request = ptlrpc_request_addref(new_req);
1668 /* XXX: This code will run into problem if we're going to support
1669 * to add a series of BRW RPCs into a self-defined ptlrpc_request_set
1670 * and wait for all of them to be finished. We should inherit request
1671 * set from old request. */
1672 ptlrpcd_add_req(new_req);
1674 DEBUG_REQ(D_INFO, new_req, "new request");
1679 * ugh, we want disk allocation on the target to happen in offset order. we'll
1680 * follow sedgewicks advice and stick to the dead simple shellsort -- it'll do
1681 * fine for our small page arrays and doesn't require allocation. its an
1682 * insertion sort that swaps elements that are strides apart, shrinking the
1683 * stride down until its '1' and the array is sorted.
1685 static void sort_brw_pages(struct brw_page **array, int num)
1688 struct brw_page *tmp;
1692 for (stride = 1; stride < num ; stride = (stride * 3) + 1)
1697 for (i = stride ; i < num ; i++) {
1700 while (j >= stride && array[j - stride]->off > tmp->off) {
1701 array[j] = array[j - stride];
1706 } while (stride > 1);
1709 static void osc_release_ppga(struct brw_page **ppga, u32 count)
1711 LASSERT(ppga != NULL);
1715 static int brw_interpret(const struct lu_env *env,
1716 struct ptlrpc_request *req, void *data, int rc)
1718 struct osc_brw_async_args *aa = data;
1719 struct osc_extent *ext;
1720 struct osc_extent *tmp;
1721 struct cl_object *obj = NULL;
1722 struct client_obd *cli = aa->aa_cli;
1724 rc = osc_brw_fini_request(req, rc);
1725 CDEBUG(D_INODE, "request %p aa %p rc %d\n", req, aa, rc);
1726 /* When server return -EINPROGRESS, client should always retry
1727 * regardless of the number of times the bulk was resent already. */
1728 if (osc_recoverable_error(rc)) {
1729 if (req->rq_import_generation !=
1730 req->rq_import->imp_generation) {
1731 CDEBUG(D_HA, "%s: resend cross eviction for object: " DOSTID ", rc = %d.\n",
1732 req->rq_import->imp_obd->obd_name,
1733 POSTID(&aa->aa_oa->o_oi), rc);
1734 } else if (rc == -EINPROGRESS ||
1735 client_should_resend(aa->aa_resends, aa->aa_cli)) {
1736 rc = osc_brw_redo_request(req, aa, rc);
1738 CERROR("%s: too many resent retries for object: %llu:%llu, rc = %d.\n",
1739 req->rq_import->imp_obd->obd_name,
1740 POSTID(&aa->aa_oa->o_oi), rc);
1745 else if (rc == -EAGAIN || rc == -EINPROGRESS)
1749 list_for_each_entry_safe(ext, tmp, &aa->aa_exts, oe_link) {
1750 if (obj == NULL && rc == 0) {
1751 obj = osc2cl(ext->oe_obj);
1755 list_del_init(&ext->oe_link);
1756 osc_extent_finish(env, ext, 1, rc);
1758 LASSERT(list_empty(&aa->aa_exts));
1759 LASSERT(list_empty(&aa->aa_oaps));
1762 struct obdo *oa = aa->aa_oa;
1763 struct cl_attr *attr = &osc_env_info(env)->oti_attr;
1764 unsigned long valid = 0;
1767 if (oa->o_valid & OBD_MD_FLBLOCKS) {
1768 attr->cat_blocks = oa->o_blocks;
1769 valid |= CAT_BLOCKS;
1771 if (oa->o_valid & OBD_MD_FLMTIME) {
1772 attr->cat_mtime = oa->o_mtime;
1775 if (oa->o_valid & OBD_MD_FLATIME) {
1776 attr->cat_atime = oa->o_atime;
1779 if (oa->o_valid & OBD_MD_FLCTIME) {
1780 attr->cat_ctime = oa->o_ctime;
1784 cl_object_attr_lock(obj);
1785 cl_object_attr_set(env, obj, attr, valid);
1786 cl_object_attr_unlock(obj);
1788 cl_object_put(env, obj);
1790 kmem_cache_free(obdo_cachep, aa->aa_oa);
1792 cl_req_completion(env, aa->aa_clerq, rc < 0 ? rc :
1793 req->rq_bulk->bd_nob_transferred);
1794 osc_release_ppga(aa->aa_ppga, aa->aa_page_count);
1795 ptlrpc_lprocfs_brw(req, req->rq_bulk->bd_nob_transferred);
1797 client_obd_list_lock(&cli->cl_loi_list_lock);
1798 /* We need to decrement before osc_ap_completion->osc_wake_cache_waiters
1799 * is called so we know whether to go to sync BRWs or wait for more
1800 * RPCs to complete */
1801 if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE)
1802 cli->cl_w_in_flight--;
1804 cli->cl_r_in_flight--;
1805 osc_wake_cache_waiters(cli);
1806 client_obd_list_unlock(&cli->cl_loi_list_lock);
1808 osc_io_unplug(env, cli, NULL);
1813 * Build an RPC by the list of extent @ext_list. The caller must ensure
1814 * that the total pages in this list are NOT over max pages per RPC.
1815 * Extents in the list must be in OES_RPC state.
1817 int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
1818 struct list_head *ext_list, int cmd)
1820 struct ptlrpc_request *req = NULL;
1821 struct osc_extent *ext;
1822 struct brw_page **pga = NULL;
1823 struct osc_brw_async_args *aa = NULL;
1824 struct obdo *oa = NULL;
1825 struct osc_async_page *oap;
1826 struct osc_async_page *tmp;
1827 struct cl_req *clerq = NULL;
1828 enum cl_req_type crt = (cmd & OBD_BRW_WRITE) ? CRT_WRITE : CRT_READ;
1829 struct ldlm_lock *lock = NULL;
1830 struct cl_req_attr *crattr = NULL;
1831 u64 starting_offset = OBD_OBJECT_EOF;
1832 u64 ending_offset = 0;
1838 struct ost_body *body;
1839 LIST_HEAD(rpc_list);
1841 LASSERT(!list_empty(ext_list));
1843 /* add pages into rpc_list to build BRW rpc */
1844 list_for_each_entry(ext, ext_list, oe_link) {
1845 LASSERT(ext->oe_state == OES_RPC);
1846 mem_tight |= ext->oe_memalloc;
1847 list_for_each_entry(oap, &ext->oe_pages, oap_pending_item) {
1849 list_add_tail(&oap->oap_rpc_item, &rpc_list);
1850 if (starting_offset > oap->oap_obj_off)
1851 starting_offset = oap->oap_obj_off;
1853 LASSERT(oap->oap_page_off == 0);
1854 if (ending_offset < oap->oap_obj_off + oap->oap_count)
1855 ending_offset = oap->oap_obj_off +
1858 LASSERT(oap->oap_page_off + oap->oap_count ==
1864 mpflag = cfs_memory_pressure_get_and_set();
1866 crattr = kzalloc(sizeof(*crattr), GFP_NOFS);
1872 pga = kcalloc(page_count, sizeof(*pga), GFP_NOFS);
1878 oa = kmem_cache_alloc(obdo_cachep, GFP_NOFS | __GFP_ZERO);
1885 list_for_each_entry(oap, &rpc_list, oap_rpc_item) {
1886 struct cl_page *page = oap2cl_page(oap);
1888 if (clerq == NULL) {
1889 clerq = cl_req_alloc(env, page, crt,
1890 1 /* only 1-object rpcs for now */);
1891 if (IS_ERR(clerq)) {
1892 rc = PTR_ERR(clerq);
1895 lock = oap->oap_ldlm_lock;
1898 oap->oap_brw_flags |= OBD_BRW_MEMALLOC;
1899 pga[i] = &oap->oap_brw_page;
1900 pga[i]->off = oap->oap_obj_off + oap->oap_page_off;
1901 CDEBUG(0, "put page %p index %lu oap %p flg %x to pga\n",
1902 pga[i]->pg, page_index(oap->oap_page), oap,
1905 cl_req_page_add(env, clerq, page);
1908 /* always get the data for the obdo for the rpc */
1909 LASSERT(clerq != NULL);
1910 crattr->cra_oa = oa;
1911 cl_req_attr_set(env, clerq, crattr, ~0ULL);
1913 oa->o_handle = lock->l_remote_handle;
1914 oa->o_valid |= OBD_MD_FLHANDLE;
1917 rc = cl_req_prep(env, clerq);
1919 CERROR("cl_req_prep failed: %d\n", rc);
1923 sort_brw_pages(pga, page_count);
1924 rc = osc_brw_prep_request(cmd, cli, oa, NULL, page_count,
1927 CERROR("prep_req failed: %d\n", rc);
1931 req->rq_interpret_reply = brw_interpret;
1934 req->rq_memalloc = 1;
1936 /* Need to update the timestamps after the request is built in case
1937 * we race with setattr (locally or in queue at OST). If OST gets
1938 * later setattr before earlier BRW (as determined by the request xid),
1939 * the OST will not use BRW timestamps. Sadly, there is no obvious
1940 * way to do this in a single call. bug 10150 */
1941 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
1942 crattr->cra_oa = &body->oa;
1943 cl_req_attr_set(env, clerq, crattr,
1944 OBD_MD_FLMTIME|OBD_MD_FLCTIME|OBD_MD_FLATIME);
1946 lustre_msg_set_jobid(req->rq_reqmsg, crattr->cra_jobid);
1948 CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
1949 aa = ptlrpc_req_async_args(req);
1950 INIT_LIST_HEAD(&aa->aa_oaps);
1951 list_splice_init(&rpc_list, &aa->aa_oaps);
1952 INIT_LIST_HEAD(&aa->aa_exts);
1953 list_splice_init(ext_list, &aa->aa_exts);
1954 aa->aa_clerq = clerq;
1956 /* queued sync pages can be torn down while the pages
1957 * were between the pending list and the rpc */
1959 list_for_each_entry(oap, &aa->aa_oaps, oap_rpc_item) {
1960 /* only one oap gets a request reference */
1963 if (oap->oap_interrupted && !req->rq_intr) {
1964 CDEBUG(D_INODE, "oap %p in req %p interrupted\n",
1966 ptlrpc_mark_interrupted(req);
1970 tmp->oap_request = ptlrpc_request_addref(req);
1972 client_obd_list_lock(&cli->cl_loi_list_lock);
1973 starting_offset >>= PAGE_CACHE_SHIFT;
1974 if (cmd == OBD_BRW_READ) {
1975 cli->cl_r_in_flight++;
1976 lprocfs_oh_tally_log2(&cli->cl_read_page_hist, page_count);
1977 lprocfs_oh_tally(&cli->cl_read_rpc_hist, cli->cl_r_in_flight);
1978 lprocfs_oh_tally_log2(&cli->cl_read_offset_hist,
1979 starting_offset + 1);
1981 cli->cl_w_in_flight++;
1982 lprocfs_oh_tally_log2(&cli->cl_write_page_hist, page_count);
1983 lprocfs_oh_tally(&cli->cl_write_rpc_hist, cli->cl_w_in_flight);
1984 lprocfs_oh_tally_log2(&cli->cl_write_offset_hist,
1985 starting_offset + 1);
1987 client_obd_list_unlock(&cli->cl_loi_list_lock);
1989 DEBUG_REQ(D_INODE, req, "%d pages, aa %p. now %dr/%dw in flight",
1990 page_count, aa, cli->cl_r_in_flight,
1991 cli->cl_w_in_flight);
1993 ptlrpcd_add_req(req);
1998 cfs_memory_pressure_restore(mpflag);
2003 LASSERT(req == NULL);
2006 kmem_cache_free(obdo_cachep, oa);
2008 /* this should happen rarely and is pretty bad, it makes the
2009 * pending list not follow the dirty order */
2010 while (!list_empty(ext_list)) {
2011 ext = list_entry(ext_list->next, struct osc_extent,
2013 list_del_init(&ext->oe_link);
2014 osc_extent_finish(env, ext, 0, rc);
2016 if (clerq && !IS_ERR(clerq))
2017 cl_req_completion(env, clerq, rc);
2022 static int osc_set_lock_data_with_check(struct ldlm_lock *lock,
2023 struct ldlm_enqueue_info *einfo)
2025 void *data = einfo->ei_cbdata;
2028 LASSERT(lock != NULL);
2029 LASSERT(lock->l_blocking_ast == einfo->ei_cb_bl);
2030 LASSERT(lock->l_resource->lr_type == einfo->ei_type);
2031 LASSERT(lock->l_completion_ast == einfo->ei_cb_cp);
2032 LASSERT(lock->l_glimpse_ast == einfo->ei_cb_gl);
2034 lock_res_and_lock(lock);
2035 spin_lock(&osc_ast_guard);
2037 if (lock->l_ast_data == NULL)
2038 lock->l_ast_data = data;
2039 if (lock->l_ast_data == data)
2042 spin_unlock(&osc_ast_guard);
2043 unlock_res_and_lock(lock);
2048 static int osc_set_data_with_check(struct lustre_handle *lockh,
2049 struct ldlm_enqueue_info *einfo)
2051 struct ldlm_lock *lock = ldlm_handle2lock(lockh);
2055 set = osc_set_lock_data_with_check(lock, einfo);
2056 LDLM_LOCK_PUT(lock);
2058 CERROR("lockh %p, data %p - client evicted?\n",
2059 lockh, einfo->ei_cbdata);
2063 /* find any ldlm lock of the inode in osc
2067 static int osc_find_cbdata(struct obd_export *exp, struct lov_stripe_md *lsm,
2068 ldlm_iterator_t replace, void *data)
2070 struct ldlm_res_id res_id;
2071 struct obd_device *obd = class_exp2obd(exp);
2074 ostid_build_res_name(&lsm->lsm_oi, &res_id);
2075 rc = ldlm_resource_iterate(obd->obd_namespace, &res_id, replace, data);
2076 if (rc == LDLM_ITER_STOP)
2078 if (rc == LDLM_ITER_CONTINUE)
2083 static int osc_enqueue_fini(struct ptlrpc_request *req, struct ost_lvb *lvb,
2084 obd_enqueue_update_f upcall, void *cookie,
2085 __u64 *flags, int agl, int rc)
2087 int intent = *flags & LDLM_FL_HAS_INTENT;
2090 /* The request was created before ldlm_cli_enqueue call. */
2091 if (rc == ELDLM_LOCK_ABORTED) {
2092 struct ldlm_reply *rep;
2094 rep = req_capsule_server_get(&req->rq_pill,
2097 LASSERT(rep != NULL);
2098 rep->lock_policy_res1 =
2099 ptlrpc_status_ntoh(rep->lock_policy_res1);
2100 if (rep->lock_policy_res1)
2101 rc = rep->lock_policy_res1;
2105 if ((intent != 0 && rc == ELDLM_LOCK_ABORTED && agl == 0) ||
2107 *flags |= LDLM_FL_LVB_READY;
2108 CDEBUG(D_INODE, "got kms %llu blocks %llu mtime %llu\n",
2109 lvb->lvb_size, lvb->lvb_blocks, lvb->lvb_mtime);
2112 /* Call the update callback. */
2113 rc = (*upcall)(cookie, rc);
2117 static int osc_enqueue_interpret(const struct lu_env *env,
2118 struct ptlrpc_request *req,
2119 struct osc_enqueue_args *aa, int rc)
2121 struct ldlm_lock *lock;
2122 struct lustre_handle handle;
2124 struct ost_lvb *lvb;
2126 __u64 *flags = aa->oa_flags;
2128 /* Make a local copy of a lock handle and a mode, because aa->oa_*
2129 * might be freed anytime after lock upcall has been called. */
2130 lustre_handle_copy(&handle, aa->oa_lockh);
2131 mode = aa->oa_ei->ei_mode;
2133 /* ldlm_cli_enqueue is holding a reference on the lock, so it must
2135 lock = ldlm_handle2lock(&handle);
2137 /* Take an additional reference so that a blocking AST that
2138 * ldlm_cli_enqueue_fini() might post for a failed lock, is guaranteed
2139 * to arrive after an upcall has been executed by
2140 * osc_enqueue_fini(). */
2141 ldlm_lock_addref(&handle, mode);
2143 /* Let CP AST to grant the lock first. */
2144 OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_CP_ENQ_RACE, 1);
2146 if (aa->oa_agl && rc == ELDLM_LOCK_ABORTED) {
2151 lvb_len = sizeof(*aa->oa_lvb);
2154 /* Complete obtaining the lock procedure. */
2155 rc = ldlm_cli_enqueue_fini(aa->oa_exp, req, aa->oa_ei->ei_type, 1,
2156 mode, flags, lvb, lvb_len, &handle, rc);
2157 /* Complete osc stuff. */
2158 rc = osc_enqueue_fini(req, aa->oa_lvb, aa->oa_upcall, aa->oa_cookie,
2159 flags, aa->oa_agl, rc);
2161 OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_CP_CANCEL_RACE, 10);
2163 /* Release the lock for async request. */
2164 if (lustre_handle_is_used(&handle) && rc == ELDLM_OK)
2166 * Releases a reference taken by ldlm_cli_enqueue(), if it is
2167 * not already released by
2168 * ldlm_cli_enqueue_fini()->failed_lock_cleanup()
2170 ldlm_lock_decref(&handle, mode);
2172 LASSERTF(lock != NULL, "lockh %p, req %p, aa %p - client evicted?\n",
2173 aa->oa_lockh, req, aa);
2174 ldlm_lock_decref(&handle, mode);
2175 LDLM_LOCK_PUT(lock);
2179 struct ptlrpc_request_set *PTLRPCD_SET = (void *)1;
2181 /* When enqueuing asynchronously, locks are not ordered, we can obtain a lock
2182 * from the 2nd OSC before a lock from the 1st one. This does not deadlock with
2183 * other synchronous requests, however keeping some locks and trying to obtain
2184 * others may take a considerable amount of time in a case of ost failure; and
2185 * when other sync requests do not get released lock from a client, the client
2186 * is excluded from the cluster -- such scenarious make the life difficult, so
2187 * release locks just after they are obtained. */
2188 int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id,
2189 __u64 *flags, ldlm_policy_data_t *policy,
2190 struct ost_lvb *lvb, int kms_valid,
2191 obd_enqueue_update_f upcall, void *cookie,
2192 struct ldlm_enqueue_info *einfo,
2193 struct lustre_handle *lockh,
2194 struct ptlrpc_request_set *rqset, int async, int agl)
2196 struct obd_device *obd = exp->exp_obd;
2197 struct ptlrpc_request *req = NULL;
2198 int intent = *flags & LDLM_FL_HAS_INTENT;
2199 __u64 match_lvb = (agl != 0 ? 0 : LDLM_FL_LVB_READY);
2203 /* Filesystem lock extents are extended to page boundaries so that
2204 * dealing with the page cache is a little smoother. */
2205 policy->l_extent.start -= policy->l_extent.start & ~CFS_PAGE_MASK;
2206 policy->l_extent.end |= ~CFS_PAGE_MASK;
2209 * kms is not valid when either object is completely fresh (so that no
2210 * locks are cached), or object was evicted. In the latter case cached
2211 * lock cannot be used, because it would prime inode state with
2212 * potentially stale LVB.
2217 /* Next, search for already existing extent locks that will cover us */
2218 /* If we're trying to read, we also search for an existing PW lock. The
2219 * VFS and page cache already protect us locally, so lots of readers/
2220 * writers can share a single PW lock.
2222 * There are problems with conversion deadlocks, so instead of
2223 * converting a read lock to a write lock, we'll just enqueue a new
2226 * At some point we should cancel the read lock instead of making them
2227 * send us a blocking callback, but there are problems with canceling
2228 * locks out from other users right now, too. */
2229 mode = einfo->ei_mode;
2230 if (einfo->ei_mode == LCK_PR)
2232 mode = ldlm_lock_match(obd->obd_namespace, *flags | match_lvb, res_id,
2233 einfo->ei_type, policy, mode, lockh, 0);
2235 struct ldlm_lock *matched = ldlm_handle2lock(lockh);
2237 if ((agl != 0) && !(matched->l_flags & LDLM_FL_LVB_READY)) {
2238 /* For AGL, if enqueue RPC is sent but the lock is not
2239 * granted, then skip to process this strpe.
2240 * Return -ECANCELED to tell the caller. */
2241 ldlm_lock_decref(lockh, mode);
2242 LDLM_LOCK_PUT(matched);
2246 if (osc_set_lock_data_with_check(matched, einfo)) {
2247 *flags |= LDLM_FL_LVB_READY;
2248 /* addref the lock only if not async requests and PW
2249 * lock is matched whereas we asked for PR. */
2250 if (!rqset && einfo->ei_mode != mode)
2251 ldlm_lock_addref(lockh, LCK_PR);
2253 /* I would like to be able to ASSERT here that
2254 * rss <= kms, but I can't, for reasons which
2255 * are explained in lov_enqueue() */
2258 /* We already have a lock, and it's referenced.
2260 * At this point, the cl_lock::cll_state is CLS_QUEUING,
2261 * AGL upcall may change it to CLS_HELD directly. */
2262 (*upcall)(cookie, ELDLM_OK);
2264 if (einfo->ei_mode != mode)
2265 ldlm_lock_decref(lockh, LCK_PW);
2267 /* For async requests, decref the lock. */
2268 ldlm_lock_decref(lockh, einfo->ei_mode);
2269 LDLM_LOCK_PUT(matched);
2273 ldlm_lock_decref(lockh, mode);
2274 LDLM_LOCK_PUT(matched);
2281 req = ptlrpc_request_alloc(class_exp2cliimp(exp),
2282 &RQF_LDLM_ENQUEUE_LVB);
2286 rc = ldlm_prep_enqueue_req(exp, req, &cancels, 0);
2288 ptlrpc_request_free(req);
2292 req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER,
2294 ptlrpc_request_set_replen(req);
2297 /* users of osc_enqueue() can pass this flag for ldlm_lock_match() */
2298 *flags &= ~LDLM_FL_BLOCK_GRANTED;
2300 rc = ldlm_cli_enqueue(exp, &req, einfo, res_id, policy, flags, lvb,
2301 sizeof(*lvb), LVB_T_OST, lockh, async);
2304 struct osc_enqueue_args *aa;
2306 CLASSERT (sizeof(*aa) <= sizeof(req->rq_async_args));
2307 aa = ptlrpc_req_async_args(req);
2310 aa->oa_flags = flags;
2311 aa->oa_upcall = upcall;
2312 aa->oa_cookie = cookie;
2314 aa->oa_lockh = lockh;
2317 req->rq_interpret_reply =
2318 (ptlrpc_interpterer_t)osc_enqueue_interpret;
2319 if (rqset == PTLRPCD_SET)
2320 ptlrpcd_add_req(req);
2322 ptlrpc_set_add_req(rqset, req);
2323 } else if (intent) {
2324 ptlrpc_req_finished(req);
2329 rc = osc_enqueue_fini(req, lvb, upcall, cookie, flags, agl, rc);
2331 ptlrpc_req_finished(req);
2336 int osc_match_base(struct obd_export *exp, struct ldlm_res_id *res_id,
2337 __u32 type, ldlm_policy_data_t *policy, __u32 mode,
2338 __u64 *flags, void *data, struct lustre_handle *lockh,
2341 struct obd_device *obd = exp->exp_obd;
2342 __u64 lflags = *flags;
2345 if (OBD_FAIL_CHECK(OBD_FAIL_OSC_MATCH))
2348 /* Filesystem lock extents are extended to page boundaries so that
2349 * dealing with the page cache is a little smoother */
2350 policy->l_extent.start -= policy->l_extent.start & ~CFS_PAGE_MASK;
2351 policy->l_extent.end |= ~CFS_PAGE_MASK;
2353 /* Next, search for already existing extent locks that will cover us */
2354 /* If we're trying to read, we also search for an existing PW lock. The
2355 * VFS and page cache already protect us locally, so lots of readers/
2356 * writers can share a single PW lock. */
2360 rc = ldlm_lock_match(obd->obd_namespace, lflags,
2361 res_id, type, policy, rc, lockh, unref);
2364 if (!osc_set_data_with_check(lockh, data)) {
2365 if (!(lflags & LDLM_FL_TEST_LOCK))
2366 ldlm_lock_decref(lockh, rc);
2370 if (!(lflags & LDLM_FL_TEST_LOCK) && mode != rc) {
2371 ldlm_lock_addref(lockh, LCK_PR);
2372 ldlm_lock_decref(lockh, LCK_PW);
2379 int osc_cancel_base(struct lustre_handle *lockh, __u32 mode)
2381 if (unlikely(mode == LCK_GROUP))
2382 ldlm_lock_decref_and_cancel(lockh, mode);
2384 ldlm_lock_decref(lockh, mode);
2389 static int osc_statfs_interpret(const struct lu_env *env,
2390 struct ptlrpc_request *req,
2391 struct osc_async_args *aa, int rc)
2393 struct obd_statfs *msfs;
2396 /* The request has in fact never been sent
2397 * due to issues at a higher level (LOV).
2398 * Exit immediately since the caller is
2399 * aware of the problem and takes care
2400 * of the clean up */
2403 if ((rc == -ENOTCONN || rc == -EAGAIN) &&
2404 (aa->aa_oi->oi_flags & OBD_STATFS_NODELAY)) {
2412 msfs = req_capsule_server_get(&req->rq_pill, &RMF_OBD_STATFS);
2418 *aa->aa_oi->oi_osfs = *msfs;
2420 rc = aa->aa_oi->oi_cb_up(aa->aa_oi, rc);
2424 static int osc_statfs_async(struct obd_export *exp,
2425 struct obd_info *oinfo, __u64 max_age,
2426 struct ptlrpc_request_set *rqset)
2428 struct obd_device *obd = class_exp2obd(exp);
2429 struct ptlrpc_request *req;
2430 struct osc_async_args *aa;
2433 /* We could possibly pass max_age in the request (as an absolute
2434 * timestamp or a "seconds.usec ago") so the target can avoid doing
2435 * extra calls into the filesystem if that isn't necessary (e.g.
2436 * during mount that would help a bit). Having relative timestamps
2437 * is not so great if request processing is slow, while absolute
2438 * timestamps are not ideal because they need time synchronization. */
2439 req = ptlrpc_request_alloc(obd->u.cli.cl_import, &RQF_OST_STATFS);
2443 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_STATFS);
2445 ptlrpc_request_free(req);
2448 ptlrpc_request_set_replen(req);
2449 req->rq_request_portal = OST_CREATE_PORTAL;
2450 ptlrpc_at_set_req_timeout(req);
2452 if (oinfo->oi_flags & OBD_STATFS_NODELAY) {
2453 /* procfs requests not want stat in wait for avoid deadlock */
2454 req->rq_no_resend = 1;
2455 req->rq_no_delay = 1;
2458 req->rq_interpret_reply = (ptlrpc_interpterer_t)osc_statfs_interpret;
2459 CLASSERT (sizeof(*aa) <= sizeof(req->rq_async_args));
2460 aa = ptlrpc_req_async_args(req);
2463 ptlrpc_set_add_req(rqset, req);
2467 static int osc_statfs(const struct lu_env *env, struct obd_export *exp,
2468 struct obd_statfs *osfs, __u64 max_age, __u32 flags)
2470 struct obd_device *obd = class_exp2obd(exp);
2471 struct obd_statfs *msfs;
2472 struct ptlrpc_request *req;
2473 struct obd_import *imp = NULL;
2476 /*Since the request might also come from lprocfs, so we need
2477 *sync this with client_disconnect_export Bug15684*/
2478 down_read(&obd->u.cli.cl_sem);
2479 if (obd->u.cli.cl_import)
2480 imp = class_import_get(obd->u.cli.cl_import);
2481 up_read(&obd->u.cli.cl_sem);
2485 /* We could possibly pass max_age in the request (as an absolute
2486 * timestamp or a "seconds.usec ago") so the target can avoid doing
2487 * extra calls into the filesystem if that isn't necessary (e.g.
2488 * during mount that would help a bit). Having relative timestamps
2489 * is not so great if request processing is slow, while absolute
2490 * timestamps are not ideal because they need time synchronization. */
2491 req = ptlrpc_request_alloc(imp, &RQF_OST_STATFS);
2493 class_import_put(imp);
2498 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_STATFS);
2500 ptlrpc_request_free(req);
2503 ptlrpc_request_set_replen(req);
2504 req->rq_request_portal = OST_CREATE_PORTAL;
2505 ptlrpc_at_set_req_timeout(req);
2507 if (flags & OBD_STATFS_NODELAY) {
2508 /* procfs requests not want stat in wait for avoid deadlock */
2509 req->rq_no_resend = 1;
2510 req->rq_no_delay = 1;
2513 rc = ptlrpc_queue_wait(req);
2517 msfs = req_capsule_server_get(&req->rq_pill, &RMF_OBD_STATFS);
2526 ptlrpc_req_finished(req);
2530 /* Retrieve object striping information.
2532 * @lmmu is a pointer to an in-core struct with lmm_ost_count indicating
2533 * the maximum number of OST indices which will fit in the user buffer.
2534 * lmm_magic must be LOV_MAGIC (we only use 1 slot here).
2536 static int osc_getstripe(struct lov_stripe_md *lsm, struct lov_user_md *lump)
2538 /* we use lov_user_md_v3 because it is larger than lov_user_md_v1 */
2539 struct lov_user_md_v3 lum, *lumk;
2540 struct lov_user_ost_data_v1 *lmm_objects;
2541 int rc = 0, lum_size;
2546 /* we only need the header part from user space to get lmm_magic and
2547 * lmm_stripe_count, (the header part is common to v1 and v3) */
2548 lum_size = sizeof(struct lov_user_md_v1);
2549 if (copy_from_user(&lum, lump, lum_size))
2552 if ((lum.lmm_magic != LOV_USER_MAGIC_V1) &&
2553 (lum.lmm_magic != LOV_USER_MAGIC_V3))
2556 /* lov_user_md_vX and lov_mds_md_vX must have the same size */
2557 LASSERT(sizeof(struct lov_user_md_v1) == sizeof(struct lov_mds_md_v1));
2558 LASSERT(sizeof(struct lov_user_md_v3) == sizeof(struct lov_mds_md_v3));
2559 LASSERT(sizeof(lum.lmm_objects[0]) == sizeof(lumk->lmm_objects[0]));
2561 /* we can use lov_mds_md_size() to compute lum_size
2562 * because lov_user_md_vX and lov_mds_md_vX have the same size */
2563 if (lum.lmm_stripe_count > 0) {
2564 lum_size = lov_mds_md_size(lum.lmm_stripe_count, lum.lmm_magic);
2565 lumk = kzalloc(lum_size, GFP_NOFS);
2569 if (lum.lmm_magic == LOV_USER_MAGIC_V1)
2571 &(((struct lov_user_md_v1 *)lumk)->lmm_objects[0]);
2573 lmm_objects = &(lumk->lmm_objects[0]);
2574 lmm_objects->l_ost_oi = lsm->lsm_oi;
2576 lum_size = lov_mds_md_size(0, lum.lmm_magic);
2580 lumk->lmm_oi = lsm->lsm_oi;
2581 lumk->lmm_stripe_count = 1;
2583 if (copy_to_user(lump, lumk, lum_size))
2592 static int osc_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
2593 void *karg, void *uarg)
2595 struct obd_device *obd = exp->exp_obd;
2596 struct obd_ioctl_data *data = karg;
2599 if (!try_module_get(THIS_MODULE)) {
2600 CERROR("Can't get module. Is it alive?");
2604 case OBD_IOC_LOV_GET_CONFIG: {
2606 struct lov_desc *desc;
2607 struct obd_uuid uuid;
2611 if (obd_ioctl_getdata(&buf, &len, uarg)) {
2616 data = (struct obd_ioctl_data *)buf;
2618 if (sizeof(*desc) > data->ioc_inllen1) {
2619 obd_ioctl_freedata(buf, len);
2624 if (data->ioc_inllen2 < sizeof(uuid)) {
2625 obd_ioctl_freedata(buf, len);
2630 desc = (struct lov_desc *)data->ioc_inlbuf1;
2631 desc->ld_tgt_count = 1;
2632 desc->ld_active_tgt_count = 1;
2633 desc->ld_default_stripe_count = 1;
2634 desc->ld_default_stripe_size = 0;
2635 desc->ld_default_stripe_offset = 0;
2636 desc->ld_pattern = 0;
2637 memcpy(&desc->ld_uuid, &obd->obd_uuid, sizeof(uuid));
2639 memcpy(data->ioc_inlbuf2, &obd->obd_uuid, sizeof(uuid));
2641 err = copy_to_user(uarg, buf, len);
2644 obd_ioctl_freedata(buf, len);
2647 case LL_IOC_LOV_SETSTRIPE:
2648 err = obd_alloc_memmd(exp, karg);
2652 case LL_IOC_LOV_GETSTRIPE:
2653 err = osc_getstripe(karg, uarg);
2655 case OBD_IOC_CLIENT_RECOVER:
2656 err = ptlrpc_recover_import(obd->u.cli.cl_import,
2657 data->ioc_inlbuf1, 0);
2661 case IOC_OSC_SET_ACTIVE:
2662 err = ptlrpc_set_import_active(obd->u.cli.cl_import,
2665 case OBD_IOC_POLL_QUOTACHECK:
2666 err = osc_quota_poll_check(exp, karg);
2668 case OBD_IOC_PING_TARGET:
2669 err = ptlrpc_obd_ping(obd);
2672 CDEBUG(D_INODE, "unrecognised ioctl %#x by %s\n",
2673 cmd, current_comm());
2678 module_put(THIS_MODULE);
2682 static int osc_get_info(const struct lu_env *env, struct obd_export *exp,
2683 u32 keylen, void *key, __u32 *vallen, void *val,
2684 struct lov_stripe_md *lsm)
2686 if (!vallen || !val)
2689 if (KEY_IS(KEY_LOCK_TO_STRIPE)) {
2690 __u32 *stripe = val;
2691 *vallen = sizeof(*stripe);
2694 } else if (KEY_IS(KEY_LAST_ID)) {
2695 struct ptlrpc_request *req;
2700 req = ptlrpc_request_alloc(class_exp2cliimp(exp),
2701 &RQF_OST_GET_INFO_LAST_ID);
2705 req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_KEY,
2706 RCL_CLIENT, keylen);
2707 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GET_INFO);
2709 ptlrpc_request_free(req);
2713 tmp = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_KEY);
2714 memcpy(tmp, key, keylen);
2716 req->rq_no_delay = req->rq_no_resend = 1;
2717 ptlrpc_request_set_replen(req);
2718 rc = ptlrpc_queue_wait(req);
2722 reply = req_capsule_server_get(&req->rq_pill, &RMF_OBD_ID);
2723 if (reply == NULL) {
2728 *((u64 *)val) = *reply;
2730 ptlrpc_req_finished(req);
2732 } else if (KEY_IS(KEY_FIEMAP)) {
2733 struct ll_fiemap_info_key *fm_key = key;
2734 struct ldlm_res_id res_id;
2735 ldlm_policy_data_t policy;
2736 struct lustre_handle lockh;
2737 ldlm_mode_t mode = 0;
2738 struct ptlrpc_request *req;
2739 struct ll_user_fiemap *reply;
2743 if (!(fm_key->fiemap.fm_flags & FIEMAP_FLAG_SYNC))
2746 policy.l_extent.start = fm_key->fiemap.fm_start &
2749 if (OBD_OBJECT_EOF - fm_key->fiemap.fm_length <=
2750 fm_key->fiemap.fm_start + PAGE_CACHE_SIZE - 1)
2751 policy.l_extent.end = OBD_OBJECT_EOF;
2753 policy.l_extent.end = (fm_key->fiemap.fm_start +
2754 fm_key->fiemap.fm_length +
2755 PAGE_CACHE_SIZE - 1) & CFS_PAGE_MASK;
2757 ostid_build_res_name(&fm_key->oa.o_oi, &res_id);
2758 mode = ldlm_lock_match(exp->exp_obd->obd_namespace,
2759 LDLM_FL_BLOCK_GRANTED |
2761 &res_id, LDLM_EXTENT, &policy,
2762 LCK_PR | LCK_PW, &lockh, 0);
2763 if (mode) { /* lock is cached on client */
2764 if (mode != LCK_PR) {
2765 ldlm_lock_addref(&lockh, LCK_PR);
2766 ldlm_lock_decref(&lockh, LCK_PW);
2768 } else { /* no cached lock, needs acquire lock on server side */
2769 fm_key->oa.o_valid |= OBD_MD_FLFLAGS;
2770 fm_key->oa.o_flags |= OBD_FL_SRVLOCK;
2774 req = ptlrpc_request_alloc(class_exp2cliimp(exp),
2775 &RQF_OST_GET_INFO_FIEMAP);
2781 req_capsule_set_size(&req->rq_pill, &RMF_FIEMAP_KEY,
2782 RCL_CLIENT, keylen);
2783 req_capsule_set_size(&req->rq_pill, &RMF_FIEMAP_VAL,
2784 RCL_CLIENT, *vallen);
2785 req_capsule_set_size(&req->rq_pill, &RMF_FIEMAP_VAL,
2786 RCL_SERVER, *vallen);
2788 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GET_INFO);
2790 ptlrpc_request_free(req);
2794 tmp = req_capsule_client_get(&req->rq_pill, &RMF_FIEMAP_KEY);
2795 memcpy(tmp, key, keylen);
2796 tmp = req_capsule_client_get(&req->rq_pill, &RMF_FIEMAP_VAL);
2797 memcpy(tmp, val, *vallen);
2799 ptlrpc_request_set_replen(req);
2800 rc = ptlrpc_queue_wait(req);
2804 reply = req_capsule_server_get(&req->rq_pill, &RMF_FIEMAP_VAL);
2805 if (reply == NULL) {
2810 memcpy(val, reply, *vallen);
2812 ptlrpc_req_finished(req);
2815 ldlm_lock_decref(&lockh, LCK_PR);
2822 static int osc_set_info_async(const struct lu_env *env, struct obd_export *exp,
2823 u32 keylen, void *key, u32 vallen,
2824 void *val, struct ptlrpc_request_set *set)
2826 struct ptlrpc_request *req;
2827 struct obd_device *obd = exp->exp_obd;
2828 struct obd_import *imp = class_exp2cliimp(exp);
2832 OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_SHUTDOWN, 10);
2834 if (KEY_IS(KEY_CHECKSUM)) {
2835 if (vallen != sizeof(int))
2837 exp->exp_obd->u.cli.cl_checksum = (*(int *)val) ? 1 : 0;
2841 if (KEY_IS(KEY_SPTLRPC_CONF)) {
2842 sptlrpc_conf_client_adapt(obd);
2846 if (KEY_IS(KEY_FLUSH_CTX)) {
2847 sptlrpc_import_flush_my_ctx(imp);
2851 if (KEY_IS(KEY_CACHE_SET)) {
2852 struct client_obd *cli = &obd->u.cli;
2854 LASSERT(cli->cl_cache == NULL); /* only once */
2855 cli->cl_cache = val;
2856 atomic_inc(&cli->cl_cache->ccc_users);
2857 cli->cl_lru_left = &cli->cl_cache->ccc_lru_left;
2859 /* add this osc into entity list */
2860 LASSERT(list_empty(&cli->cl_lru_osc));
2861 spin_lock(&cli->cl_cache->ccc_lru_lock);
2862 list_add(&cli->cl_lru_osc, &cli->cl_cache->ccc_lru);
2863 spin_unlock(&cli->cl_cache->ccc_lru_lock);
2868 if (KEY_IS(KEY_CACHE_LRU_SHRINK)) {
2869 struct client_obd *cli = &obd->u.cli;
2870 int nr = atomic_read(&cli->cl_lru_in_list) >> 1;
2871 int target = *(int *)val;
2873 nr = osc_lru_shrink(cli, min(nr, target));
2878 if (!set && !KEY_IS(KEY_GRANT_SHRINK))
2881 /* We pass all other commands directly to OST. Since nobody calls osc
2882 methods directly and everybody is supposed to go through LOV, we
2883 assume lov checked invalid values for us.
2884 The only recognised values so far are evict_by_nid and mds_conn.
2885 Even if something bad goes through, we'd get a -EINVAL from OST
2888 req = ptlrpc_request_alloc(imp, KEY_IS(KEY_GRANT_SHRINK) ?
2889 &RQF_OST_SET_GRANT_INFO :
2894 req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_KEY,
2895 RCL_CLIENT, keylen);
2896 if (!KEY_IS(KEY_GRANT_SHRINK))
2897 req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_VAL,
2898 RCL_CLIENT, vallen);
2899 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SET_INFO);
2901 ptlrpc_request_free(req);
2905 tmp = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_KEY);
2906 memcpy(tmp, key, keylen);
2907 tmp = req_capsule_client_get(&req->rq_pill, KEY_IS(KEY_GRANT_SHRINK) ?
2910 memcpy(tmp, val, vallen);
2912 if (KEY_IS(KEY_GRANT_SHRINK)) {
2913 struct osc_brw_async_args *aa;
2916 CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
2917 aa = ptlrpc_req_async_args(req);
2918 oa = kmem_cache_alloc(obdo_cachep, GFP_NOFS | __GFP_ZERO);
2920 ptlrpc_req_finished(req);
2923 *oa = ((struct ost_body *)val)->oa;
2925 req->rq_interpret_reply = osc_shrink_grant_interpret;
2928 ptlrpc_request_set_replen(req);
2929 if (!KEY_IS(KEY_GRANT_SHRINK)) {
2930 LASSERT(set != NULL);
2931 ptlrpc_set_add_req(set, req);
2932 ptlrpc_check_set(NULL, set);
2934 ptlrpcd_add_req(req);
2940 static int osc_reconnect(const struct lu_env *env,
2941 struct obd_export *exp, struct obd_device *obd,
2942 struct obd_uuid *cluuid,
2943 struct obd_connect_data *data,
2946 struct client_obd *cli = &obd->u.cli;
2948 if (data != NULL && (data->ocd_connect_flags & OBD_CONNECT_GRANT)) {
2951 client_obd_list_lock(&cli->cl_loi_list_lock);
2952 data->ocd_grant = (cli->cl_avail_grant + cli->cl_dirty) ?:
2953 2 * cli_brw_size(obd);
2954 lost_grant = cli->cl_lost_grant;
2955 cli->cl_lost_grant = 0;
2956 client_obd_list_unlock(&cli->cl_loi_list_lock);
2958 CDEBUG(D_RPCTRACE, "ocd_connect_flags: %#llx ocd_version: %d ocd_grant: %d, lost: %ld.\n",
2959 data->ocd_connect_flags,
2960 data->ocd_version, data->ocd_grant, lost_grant);
2966 static int osc_disconnect(struct obd_export *exp)
2968 struct obd_device *obd = class_exp2obd(exp);
2971 rc = client_disconnect_export(exp);
2973 * Initially we put del_shrink_grant before disconnect_export, but it
2974 * causes the following problem if setup (connect) and cleanup
2975 * (disconnect) are tangled together.
2976 * connect p1 disconnect p2
2977 * ptlrpc_connect_import
2978 * ............... class_manual_cleanup
2981 * ptlrpc_connect_interrupt
2983 * add this client to shrink list
2985 * Bang! pinger trigger the shrink.
2986 * So the osc should be disconnected from the shrink list, after we
2987 * are sure the import has been destroyed. BUG18662
2989 if (obd->u.cli.cl_import == NULL)
2990 osc_del_shrink_grant(&obd->u.cli);
2994 static int osc_import_event(struct obd_device *obd,
2995 struct obd_import *imp,
2996 enum obd_import_event event)
2998 struct client_obd *cli;
3001 LASSERT(imp->imp_obd == obd);
3004 case IMP_EVENT_DISCON: {
3006 client_obd_list_lock(&cli->cl_loi_list_lock);
3007 cli->cl_avail_grant = 0;
3008 cli->cl_lost_grant = 0;
3009 client_obd_list_unlock(&cli->cl_loi_list_lock);
3012 case IMP_EVENT_INACTIVE: {
3013 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_INACTIVE, NULL);
3016 case IMP_EVENT_INVALIDATE: {
3017 struct ldlm_namespace *ns = obd->obd_namespace;
3021 env = cl_env_get(&refcheck);
3025 /* all pages go to failing rpcs due to the invalid
3027 osc_io_unplug(env, cli, NULL);
3029 ldlm_namespace_cleanup(ns, LDLM_FL_LOCAL_ONLY);
3030 cl_env_put(env, &refcheck);
3035 case IMP_EVENT_ACTIVE: {
3036 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_ACTIVE, NULL);
3039 case IMP_EVENT_OCD: {
3040 struct obd_connect_data *ocd = &imp->imp_connect_data;
3042 if (ocd->ocd_connect_flags & OBD_CONNECT_GRANT)
3043 osc_init_grant(&obd->u.cli, ocd);
3046 if (ocd->ocd_connect_flags & OBD_CONNECT_REQPORTAL)
3047 imp->imp_client->cli_request_portal = OST_REQUEST_PORTAL;
3049 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_OCD, NULL);
3052 case IMP_EVENT_DEACTIVATE: {
3053 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_DEACTIVATE, NULL);
3056 case IMP_EVENT_ACTIVATE: {
3057 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_ACTIVATE, NULL);
3061 CERROR("Unknown import event %d\n", event);
3068 * Determine whether the lock can be canceled before replaying the lock
3069 * during recovery, see bug16774 for detailed information.
3071 * \retval zero the lock can't be canceled
3072 * \retval other ok to cancel
3074 static int osc_cancel_for_recovery(struct ldlm_lock *lock)
3076 check_res_locked(lock->l_resource);
3079 * Cancel all unused extent lock in granted mode LCK_PR or LCK_CR.
3081 * XXX as a future improvement, we can also cancel unused write lock
3082 * if it doesn't have dirty data and active mmaps.
3084 if (lock->l_resource->lr_type == LDLM_EXTENT &&
3085 (lock->l_granted_mode == LCK_PR ||
3086 lock->l_granted_mode == LCK_CR) &&
3087 (osc_dlm_lock_pageref(lock) == 0))
3093 static int brw_queue_work(const struct lu_env *env, void *data)
3095 struct client_obd *cli = data;
3097 CDEBUG(D_CACHE, "Run writeback work for client obd %p.\n", cli);
3099 osc_io_unplug(env, cli, NULL);
3103 int osc_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
3105 struct lprocfs_static_vars lvars = { NULL };
3106 struct client_obd *cli = &obd->u.cli;
3113 rc = ptlrpcd_addref();
3117 rc = client_obd_setup(obd, lcfg);
3121 handler = ptlrpcd_alloc_work(cli->cl_import, brw_queue_work, cli);
3122 if (IS_ERR(handler)) {
3123 rc = PTR_ERR(handler);
3124 goto out_client_setup;
3126 cli->cl_writeback_work = handler;
3128 rc = osc_quota_setup(obd);
3130 goto out_ptlrpcd_work;
3132 cli->cl_grant_shrink_interval = GRANT_SHRINK_INTERVAL;
3133 lprocfs_osc_init_vars(&lvars);
3134 if (lprocfs_obd_setup(obd, lvars.obd_vars, lvars.sysfs_vars) == 0) {
3135 lproc_osc_attach_seqstat(obd);
3136 sptlrpc_lprocfs_cliobd_attach(obd);
3137 ptlrpc_lprocfs_register_obd(obd);
3141 * We try to control the total number of requests with a upper limit
3142 * osc_reqpool_maxreqcount. There might be some race which will cause
3143 * over-limit allocation, but it is fine.
3145 req_count = atomic_read(&osc_pool_req_count);
3146 if (req_count < osc_reqpool_maxreqcount) {
3147 adding = cli->cl_max_rpcs_in_flight + 2;
3148 if (req_count + adding > osc_reqpool_maxreqcount)
3149 adding = osc_reqpool_maxreqcount - req_count;
3151 added = ptlrpc_add_rqs_to_pool(osc_rq_pool, adding);
3152 atomic_add(added, &osc_pool_req_count);
3155 INIT_LIST_HEAD(&cli->cl_grant_shrink_list);
3156 ns_register_cancel(obd->obd_namespace, osc_cancel_for_recovery);
3160 ptlrpcd_destroy_work(handler);
3162 client_obd_cleanup(obd);
3168 static int osc_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage)
3171 case OBD_CLEANUP_EARLY: {
3172 struct obd_import *imp;
3174 imp = obd->u.cli.cl_import;
3175 CDEBUG(D_HA, "Deactivating import %s\n", obd->obd_name);
3176 /* ptlrpc_abort_inflight to stop an mds_lov_synchronize */
3177 ptlrpc_deactivate_import(imp);
3178 spin_lock(&imp->imp_lock);
3179 imp->imp_pingable = 0;
3180 spin_unlock(&imp->imp_lock);
3183 case OBD_CLEANUP_EXPORTS: {
3184 struct client_obd *cli = &obd->u.cli;
3186 * for echo client, export may be on zombie list, wait for
3187 * zombie thread to cull it, because cli.cl_import will be
3188 * cleared in client_disconnect_export():
3189 * class_export_destroy() -> obd_cleanup() ->
3190 * echo_device_free() -> echo_client_cleanup() ->
3191 * obd_disconnect() -> osc_disconnect() ->
3192 * client_disconnect_export()
3194 obd_zombie_barrier();
3195 if (cli->cl_writeback_work) {
3196 ptlrpcd_destroy_work(cli->cl_writeback_work);
3197 cli->cl_writeback_work = NULL;
3199 obd_cleanup_client_import(obd);
3200 ptlrpc_lprocfs_unregister_obd(obd);
3201 lprocfs_obd_cleanup(obd);
3208 int osc_cleanup(struct obd_device *obd)
3210 struct client_obd *cli = &obd->u.cli;
3214 if (cli->cl_cache != NULL) {
3215 LASSERT(atomic_read(&cli->cl_cache->ccc_users) > 0);
3216 spin_lock(&cli->cl_cache->ccc_lru_lock);
3217 list_del_init(&cli->cl_lru_osc);
3218 spin_unlock(&cli->cl_cache->ccc_lru_lock);
3219 cli->cl_lru_left = NULL;
3220 atomic_dec(&cli->cl_cache->ccc_users);
3221 cli->cl_cache = NULL;
3224 /* free memory of osc quota cache */
3225 osc_quota_cleanup(obd);
3227 rc = client_obd_cleanup(obd);
3233 int osc_process_config_base(struct obd_device *obd, struct lustre_cfg *lcfg)
3235 struct lprocfs_static_vars lvars = { NULL };
3238 lprocfs_osc_init_vars(&lvars);
3240 switch (lcfg->lcfg_command) {
3242 rc = class_process_proc_param(PARAM_OSC, lvars.obd_vars,
3252 static int osc_process_config(struct obd_device *obd, u32 len, void *buf)
3254 return osc_process_config_base(obd, buf);
3257 struct obd_ops osc_obd_ops = {
3258 .o_owner = THIS_MODULE,
3259 .o_setup = osc_setup,
3260 .o_precleanup = osc_precleanup,
3261 .o_cleanup = osc_cleanup,
3262 .o_add_conn = client_import_add_conn,
3263 .o_del_conn = client_import_del_conn,
3264 .o_connect = client_connect_import,
3265 .o_reconnect = osc_reconnect,
3266 .o_disconnect = osc_disconnect,
3267 .o_statfs = osc_statfs,
3268 .o_statfs_async = osc_statfs_async,
3269 .o_packmd = osc_packmd,
3270 .o_unpackmd = osc_unpackmd,
3271 .o_create = osc_create,
3272 .o_destroy = osc_destroy,
3273 .o_getattr = osc_getattr,
3274 .o_getattr_async = osc_getattr_async,
3275 .o_setattr = osc_setattr,
3276 .o_setattr_async = osc_setattr_async,
3277 .o_find_cbdata = osc_find_cbdata,
3278 .o_iocontrol = osc_iocontrol,
3279 .o_get_info = osc_get_info,
3280 .o_set_info_async = osc_set_info_async,
3281 .o_import_event = osc_import_event,
3282 .o_process_config = osc_process_config,
3283 .o_quotactl = osc_quotactl,
3284 .o_quotacheck = osc_quotacheck,
3287 extern struct lu_kmem_descr osc_caches[];
3288 extern spinlock_t osc_ast_guard;
3289 extern struct lock_class_key osc_ast_guard_class;
3291 static int __init osc_init(void)
3293 struct lprocfs_static_vars lvars = { NULL };
3294 unsigned int reqpool_size;
3295 unsigned int reqsize;
3298 /* print an address of _any_ initialized kernel symbol from this
3299 * module, to allow debugging with gdb that doesn't support data
3300 * symbols from modules.*/
3301 CDEBUG(D_INFO, "Lustre OSC module (%p).\n", &osc_caches);
3303 rc = lu_kmem_init(osc_caches);
3307 lprocfs_osc_init_vars(&lvars);
3309 rc = class_register_type(&osc_obd_ops, NULL,
3310 LUSTRE_OSC_NAME, &osc_device_type);
3314 spin_lock_init(&osc_ast_guard);
3315 lockdep_set_class(&osc_ast_guard, &osc_ast_guard_class);
3317 /* This is obviously too much memory, only prevent overflow here */
3318 if (osc_reqpool_mem_max >= 1 << 12 || osc_reqpool_mem_max == 0) {
3323 reqpool_size = osc_reqpool_mem_max << 20;
3326 while (reqsize < OST_MAXREQSIZE)
3327 reqsize = reqsize << 1;
3330 * We don't enlarge the request count in OSC pool according to
3331 * cl_max_rpcs_in_flight. The allocation from the pool will only be
3332 * tried after normal allocation failed. So a small OSC pool won't
3333 * cause much performance degression in most of cases.
3335 osc_reqpool_maxreqcount = reqpool_size / reqsize;
3337 atomic_set(&osc_pool_req_count, 0);
3338 osc_rq_pool = ptlrpc_init_rq_pool(0, OST_MAXREQSIZE,
3339 ptlrpc_add_rqs_to_pool);
3347 class_unregister_type(LUSTRE_OSC_NAME);
3349 lu_kmem_fini(osc_caches);
3353 static void /*__exit*/ osc_exit(void)
3355 class_unregister_type(LUSTRE_OSC_NAME);
3356 lu_kmem_fini(osc_caches);
3357 ptlrpc_free_rq_pool(osc_rq_pool);
3360 MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
3361 MODULE_DESCRIPTION("Lustre Object Storage Client (OSC)");
3362 MODULE_LICENSE("GPL");
3363 MODULE_VERSION(LUSTRE_VERSION_STRING);
3365 module_init(osc_init);
3366 module_exit(osc_exit);