2 * Module for pnfs flexfile layout driver.
4 * Copyright (c) 2014, Primary Data, Inc. All rights reserved.
6 * Tao Peng <bergwolf@primarydata.com>
9 #include <linux/nfs_fs.h>
10 #include <linux/nfs_page.h>
11 #include <linux/module.h>
12 #include <linux/sched/mm.h>
14 #include <linux/sunrpc/metrics.h>
16 #include "flexfilelayout.h"
17 #include "../nfs4session.h"
18 #include "../nfs4idmap.h"
19 #include "../internal.h"
20 #include "../delegation.h"
21 #include "../nfs4trace.h"
22 #include "../iostat.h"
26 #define NFSDBG_FACILITY NFSDBG_PNFS_LD
28 #define FF_LAYOUT_POLL_RETRY_MAX (15*HZ)
29 #define FF_LAYOUTRETURN_MAXERR 20
31 static void ff_layout_read_record_layoutstats_done(struct rpc_task *task,
32 struct nfs_pgio_header *hdr);
33 static int ff_layout_mirror_prepare_stats(struct pnfs_layout_hdr *lo,
34 struct nfs42_layoutstat_devinfo *devinfo,
36 static void ff_layout_encode_ff_layoutupdate(struct xdr_stream *xdr,
37 const struct nfs42_layoutstat_devinfo *devinfo,
38 struct nfs4_ff_layout_mirror *mirror);
40 static struct pnfs_layout_hdr *
41 ff_layout_alloc_layout_hdr(struct inode *inode, gfp_t gfp_flags)
43 struct nfs4_flexfile_layout *ffl;
45 ffl = kzalloc(sizeof(*ffl), gfp_flags);
47 INIT_LIST_HEAD(&ffl->error_list);
48 INIT_LIST_HEAD(&ffl->mirrors);
49 ffl->last_report_time = ktime_get();
50 return &ffl->generic_hdr;
56 ff_layout_free_layout_hdr(struct pnfs_layout_hdr *lo)
58 struct nfs4_ff_layout_ds_err *err, *n;
60 list_for_each_entry_safe(err, n, &FF_LAYOUT_FROM_HDR(lo)->error_list,
65 kfree(FF_LAYOUT_FROM_HDR(lo));
68 static int decode_pnfs_stateid(struct xdr_stream *xdr, nfs4_stateid *stateid)
72 p = xdr_inline_decode(xdr, NFS4_STATEID_SIZE);
73 if (unlikely(p == NULL))
75 stateid->type = NFS4_PNFS_DS_STATEID_TYPE;
76 memcpy(stateid->data, p, NFS4_STATEID_SIZE);
77 dprintk("%s: stateid id= [%x%x%x%x]\n", __func__,
78 p[0], p[1], p[2], p[3]);
82 static int decode_deviceid(struct xdr_stream *xdr, struct nfs4_deviceid *devid)
86 p = xdr_inline_decode(xdr, NFS4_DEVICEID4_SIZE);
89 memcpy(devid, p, NFS4_DEVICEID4_SIZE);
90 nfs4_print_deviceid(devid);
94 static int decode_nfs_fh(struct xdr_stream *xdr, struct nfs_fh *fh)
98 p = xdr_inline_decode(xdr, 4);
101 fh->size = be32_to_cpup(p++);
102 if (fh->size > sizeof(struct nfs_fh)) {
103 printk(KERN_ERR "NFS flexfiles: Too big fh received %d\n",
108 p = xdr_inline_decode(xdr, fh->size);
111 memcpy(&fh->data, p, fh->size);
112 dprintk("%s: fh len %d\n", __func__, fh->size);
118 * Currently only stringified uids and gids are accepted.
119 * I.e., kerberos is not supported to the DSes, so no pricipals.
121 * That means that one common function will suffice, but when
122 * principals are added, this should be split to accomodate
123 * calls to both nfs_map_name_to_uid() and nfs_map_group_to_gid().
126 decode_name(struct xdr_stream *xdr, u32 *id)
131 /* opaque_length(4)*/
132 p = xdr_inline_decode(xdr, 4);
135 len = be32_to_cpup(p++);
139 dprintk("%s: len %u\n", __func__, len);
142 p = xdr_inline_decode(xdr, len);
146 if (!nfs_map_string_to_numeric((char *)p, len, id))
152 static bool ff_mirror_match_fh(const struct nfs4_ff_layout_mirror *m1,
153 const struct nfs4_ff_layout_mirror *m2)
157 if (m1->fh_versions_cnt != m2->fh_versions_cnt)
159 for (i = 0; i < m1->fh_versions_cnt; i++) {
160 bool found_fh = false;
161 for (j = 0; j < m2->fh_versions_cnt; j++) {
162 if (nfs_compare_fh(&m1->fh_versions[i],
163 &m2->fh_versions[j]) == 0) {
174 static struct nfs4_ff_layout_mirror *
175 ff_layout_add_mirror(struct pnfs_layout_hdr *lo,
176 struct nfs4_ff_layout_mirror *mirror)
178 struct nfs4_flexfile_layout *ff_layout = FF_LAYOUT_FROM_HDR(lo);
179 struct nfs4_ff_layout_mirror *pos;
180 struct inode *inode = lo->plh_inode;
182 spin_lock(&inode->i_lock);
183 list_for_each_entry(pos, &ff_layout->mirrors, mirrors) {
184 if (memcmp(&mirror->devid, &pos->devid, sizeof(pos->devid)) != 0)
186 if (!ff_mirror_match_fh(mirror, pos))
188 if (refcount_inc_not_zero(&pos->ref)) {
189 spin_unlock(&inode->i_lock);
193 list_add(&mirror->mirrors, &ff_layout->mirrors);
195 spin_unlock(&inode->i_lock);
200 ff_layout_remove_mirror(struct nfs4_ff_layout_mirror *mirror)
203 if (mirror->layout == NULL)
205 inode = mirror->layout->plh_inode;
206 spin_lock(&inode->i_lock);
207 list_del(&mirror->mirrors);
208 spin_unlock(&inode->i_lock);
209 mirror->layout = NULL;
212 static struct nfs4_ff_layout_mirror *ff_layout_alloc_mirror(gfp_t gfp_flags)
214 struct nfs4_ff_layout_mirror *mirror;
216 mirror = kzalloc(sizeof(*mirror), gfp_flags);
217 if (mirror != NULL) {
218 spin_lock_init(&mirror->lock);
219 refcount_set(&mirror->ref, 1);
220 INIT_LIST_HEAD(&mirror->mirrors);
225 static void ff_layout_free_mirror(struct nfs4_ff_layout_mirror *mirror)
227 const struct cred *cred;
229 ff_layout_remove_mirror(mirror);
230 kfree(mirror->fh_versions);
231 cred = rcu_access_pointer(mirror->ro_cred);
233 cred = rcu_access_pointer(mirror->rw_cred);
235 nfs4_ff_layout_put_deviceid(mirror->mirror_ds);
239 static void ff_layout_put_mirror(struct nfs4_ff_layout_mirror *mirror)
241 if (mirror != NULL && refcount_dec_and_test(&mirror->ref))
242 ff_layout_free_mirror(mirror);
245 static void ff_layout_free_mirror_array(struct nfs4_ff_layout_segment *fls)
249 if (fls->mirror_array) {
250 for (i = 0; i < fls->mirror_array_cnt; i++) {
251 /* normally mirror_ds is freed in
252 * .free_deviceid_node but we still do it here
253 * for .alloc_lseg error path */
254 ff_layout_put_mirror(fls->mirror_array[i]);
256 kfree(fls->mirror_array);
257 fls->mirror_array = NULL;
261 static int ff_layout_check_layout(struct nfs4_layoutget_res *lgr)
265 dprintk("--> %s\n", __func__);
267 /* FIXME: remove this check when layout segment support is added */
268 if (lgr->range.offset != 0 ||
269 lgr->range.length != NFS4_MAX_UINT64) {
270 dprintk("%s Only whole file layouts supported. Use MDS i/o\n",
275 dprintk("--> %s returns %d\n", __func__, ret);
279 static void _ff_layout_free_lseg(struct nfs4_ff_layout_segment *fls)
282 ff_layout_free_mirror_array(fls);
288 ff_lseg_range_is_after(const struct pnfs_layout_range *l1,
289 const struct pnfs_layout_range *l2)
293 if (l1->iomode != l2->iomode)
294 return l1->iomode != IOMODE_READ;
295 end1 = pnfs_calc_offset_end(l1->offset, l1->length);
296 end2 = pnfs_calc_offset_end(l2->offset, l2->length);
297 if (end1 < l2->offset)
299 if (end2 < l1->offset)
301 return l2->offset <= l1->offset;
305 ff_lseg_merge(struct pnfs_layout_segment *new,
306 struct pnfs_layout_segment *old)
308 u64 new_end, old_end;
310 if (test_bit(NFS_LSEG_LAYOUTRETURN, &old->pls_flags))
312 if (new->pls_range.iomode != old->pls_range.iomode)
314 old_end = pnfs_calc_offset_end(old->pls_range.offset,
315 old->pls_range.length);
316 if (old_end < new->pls_range.offset)
318 new_end = pnfs_calc_offset_end(new->pls_range.offset,
319 new->pls_range.length);
320 if (new_end < old->pls_range.offset)
323 /* Mergeable: copy info from 'old' to 'new' */
324 if (new_end < old_end)
326 if (new->pls_range.offset < old->pls_range.offset)
327 new->pls_range.offset = old->pls_range.offset;
328 new->pls_range.length = pnfs_calc_offset_length(new->pls_range.offset,
330 if (test_bit(NFS_LSEG_ROC, &old->pls_flags))
331 set_bit(NFS_LSEG_ROC, &new->pls_flags);
336 ff_layout_add_lseg(struct pnfs_layout_hdr *lo,
337 struct pnfs_layout_segment *lseg,
338 struct list_head *free_me)
340 pnfs_generic_layout_insert_lseg(lo, lseg,
341 ff_lseg_range_is_after,
346 static void ff_layout_sort_mirrors(struct nfs4_ff_layout_segment *fls)
350 for (i = 0; i < fls->mirror_array_cnt - 1; i++) {
351 for (j = i + 1; j < fls->mirror_array_cnt; j++)
352 if (fls->mirror_array[i]->efficiency <
353 fls->mirror_array[j]->efficiency)
354 swap(fls->mirror_array[i],
355 fls->mirror_array[j]);
359 static struct pnfs_layout_segment *
360 ff_layout_alloc_lseg(struct pnfs_layout_hdr *lh,
361 struct nfs4_layoutget_res *lgr,
364 struct pnfs_layout_segment *ret;
365 struct nfs4_ff_layout_segment *fls = NULL;
366 struct xdr_stream stream;
368 struct page *scratch;
370 u32 mirror_array_cnt;
374 dprintk("--> %s\n", __func__);
375 scratch = alloc_page(gfp_flags);
377 return ERR_PTR(-ENOMEM);
379 xdr_init_decode_pages(&stream, &buf, lgr->layoutp->pages,
381 xdr_set_scratch_buffer(&stream, page_address(scratch), PAGE_SIZE);
383 /* stripe unit and mirror_array_cnt */
385 p = xdr_inline_decode(&stream, 8 + 4);
389 p = xdr_decode_hyper(p, &stripe_unit);
390 mirror_array_cnt = be32_to_cpup(p++);
391 dprintk("%s: stripe_unit=%llu mirror_array_cnt=%u\n", __func__,
392 stripe_unit, mirror_array_cnt);
394 if (mirror_array_cnt > NFS4_FLEXFILE_LAYOUT_MAX_MIRROR_CNT ||
395 mirror_array_cnt == 0)
399 fls = kzalloc(sizeof(*fls), gfp_flags);
403 fls->mirror_array_cnt = mirror_array_cnt;
404 fls->stripe_unit = stripe_unit;
405 fls->mirror_array = kcalloc(fls->mirror_array_cnt,
406 sizeof(fls->mirror_array[0]), gfp_flags);
407 if (fls->mirror_array == NULL)
410 for (i = 0; i < fls->mirror_array_cnt; i++) {
411 struct nfs4_ff_layout_mirror *mirror;
413 const struct cred *cred;
416 u32 ds_count, fh_count, id;
420 p = xdr_inline_decode(&stream, 4);
423 ds_count = be32_to_cpup(p);
425 /* FIXME: allow for striping? */
429 fls->mirror_array[i] = ff_layout_alloc_mirror(gfp_flags);
430 if (fls->mirror_array[i] == NULL) {
435 fls->mirror_array[i]->ds_count = ds_count;
438 rc = decode_deviceid(&stream, &fls->mirror_array[i]->devid);
444 p = xdr_inline_decode(&stream, 4);
447 fls->mirror_array[i]->efficiency = be32_to_cpup(p);
450 rc = decode_pnfs_stateid(&stream, &fls->mirror_array[i]->stateid);
456 p = xdr_inline_decode(&stream, 4);
459 fh_count = be32_to_cpup(p);
461 fls->mirror_array[i]->fh_versions =
462 kcalloc(fh_count, sizeof(struct nfs_fh),
464 if (fls->mirror_array[i]->fh_versions == NULL) {
469 for (j = 0; j < fh_count; j++) {
470 rc = decode_nfs_fh(&stream,
471 &fls->mirror_array[i]->fh_versions[j]);
476 fls->mirror_array[i]->fh_versions_cnt = fh_count;
479 rc = decode_name(&stream, &id);
483 uid = make_kuid(&init_user_ns, id);
486 rc = decode_name(&stream, &id);
490 gid = make_kgid(&init_user_ns, id);
492 if (gfp_flags & __GFP_FS)
493 kcred = prepare_kernel_cred(NULL);
495 unsigned int nofs_flags = memalloc_nofs_save();
496 kcred = prepare_kernel_cred(NULL);
497 memalloc_nofs_restore(nofs_flags);
506 if (lgr->range.iomode == IOMODE_READ)
507 rcu_assign_pointer(fls->mirror_array[i]->ro_cred, cred);
509 rcu_assign_pointer(fls->mirror_array[i]->rw_cred, cred);
511 mirror = ff_layout_add_mirror(lh, fls->mirror_array[i]);
512 if (mirror != fls->mirror_array[i]) {
513 /* swap cred ptrs so free_mirror will clean up old */
514 if (lgr->range.iomode == IOMODE_READ) {
515 cred = xchg(&mirror->ro_cred, cred);
516 rcu_assign_pointer(fls->mirror_array[i]->ro_cred, cred);
518 cred = xchg(&mirror->rw_cred, cred);
519 rcu_assign_pointer(fls->mirror_array[i]->rw_cred, cred);
521 ff_layout_free_mirror(fls->mirror_array[i]);
522 fls->mirror_array[i] = mirror;
525 dprintk("%s: iomode %s uid %u gid %u\n", __func__,
526 lgr->range.iomode == IOMODE_READ ? "READ" : "RW",
527 from_kuid(&init_user_ns, uid),
528 from_kgid(&init_user_ns, gid));
531 p = xdr_inline_decode(&stream, 4);
533 goto out_sort_mirrors;
534 fls->flags = be32_to_cpup(p);
536 p = xdr_inline_decode(&stream, 4);
538 goto out_sort_mirrors;
539 for (i=0; i < fls->mirror_array_cnt; i++)
540 fls->mirror_array[i]->report_interval = be32_to_cpup(p);
543 ff_layout_sort_mirrors(fls);
544 rc = ff_layout_check_layout(lgr);
547 ret = &fls->generic_hdr;
548 dprintk("<-- %s (success)\n", __func__);
550 __free_page(scratch);
553 _ff_layout_free_lseg(fls);
555 dprintk("<-- %s (%d)\n", __func__, rc);
559 static bool ff_layout_has_rw_segments(struct pnfs_layout_hdr *layout)
561 struct pnfs_layout_segment *lseg;
563 list_for_each_entry(lseg, &layout->plh_segs, pls_list)
564 if (lseg->pls_range.iomode == IOMODE_RW)
571 ff_layout_free_lseg(struct pnfs_layout_segment *lseg)
573 struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg);
575 dprintk("--> %s\n", __func__);
577 if (lseg->pls_range.iomode == IOMODE_RW) {
578 struct nfs4_flexfile_layout *ffl;
581 ffl = FF_LAYOUT_FROM_HDR(lseg->pls_layout);
582 inode = ffl->generic_hdr.plh_inode;
583 spin_lock(&inode->i_lock);
584 if (!ff_layout_has_rw_segments(lseg->pls_layout)) {
585 ffl->commit_info.nbuckets = 0;
586 kfree(ffl->commit_info.buckets);
587 ffl->commit_info.buckets = NULL;
589 spin_unlock(&inode->i_lock);
591 _ff_layout_free_lseg(fls);
594 /* Return 1 until we have multiple lsegs support */
596 ff_layout_get_lseg_count(struct nfs4_ff_layout_segment *fls)
602 nfs4_ff_start_busy_timer(struct nfs4_ff_busy_timer *timer, ktime_t now)
604 /* first IO request? */
605 if (atomic_inc_return(&timer->n_ops) == 1) {
606 timer->start_time = now;
611 nfs4_ff_end_busy_timer(struct nfs4_ff_busy_timer *timer, ktime_t now)
615 if (atomic_dec_return(&timer->n_ops) < 0)
618 start = timer->start_time;
619 timer->start_time = now;
620 return ktime_sub(now, start);
624 nfs4_ff_layoutstat_start_io(struct nfs4_ff_layout_mirror *mirror,
625 struct nfs4_ff_layoutstat *layoutstat,
628 s64 report_interval = FF_LAYOUTSTATS_REPORT_INTERVAL;
629 struct nfs4_flexfile_layout *ffl = FF_LAYOUT_FROM_HDR(mirror->layout);
631 nfs4_ff_start_busy_timer(&layoutstat->busy_timer, now);
632 if (!mirror->start_time)
633 mirror->start_time = now;
634 if (mirror->report_interval != 0)
635 report_interval = (s64)mirror->report_interval * 1000LL;
636 else if (layoutstats_timer != 0)
637 report_interval = (s64)layoutstats_timer * 1000LL;
638 if (ktime_to_ms(ktime_sub(now, ffl->last_report_time)) >=
640 ffl->last_report_time = now;
648 nfs4_ff_layout_stat_io_update_requested(struct nfs4_ff_layoutstat *layoutstat,
651 struct nfs4_ff_io_stat *iostat = &layoutstat->io_stat;
653 iostat->ops_requested++;
654 iostat->bytes_requested += requested;
658 nfs4_ff_layout_stat_io_update_completed(struct nfs4_ff_layoutstat *layoutstat,
661 ktime_t time_completed,
662 ktime_t time_started)
664 struct nfs4_ff_io_stat *iostat = &layoutstat->io_stat;
665 ktime_t completion_time = ktime_sub(time_completed, time_started);
668 iostat->ops_completed++;
669 iostat->bytes_completed += completed;
670 iostat->bytes_not_delivered += requested - completed;
672 timer = nfs4_ff_end_busy_timer(&layoutstat->busy_timer, time_completed);
673 iostat->total_busy_time =
674 ktime_add(iostat->total_busy_time, timer);
675 iostat->aggregate_completion_time =
676 ktime_add(iostat->aggregate_completion_time,
681 nfs4_ff_layout_stat_io_start_read(struct inode *inode,
682 struct nfs4_ff_layout_mirror *mirror,
683 __u64 requested, ktime_t now)
687 spin_lock(&mirror->lock);
688 report = nfs4_ff_layoutstat_start_io(mirror, &mirror->read_stat, now);
689 nfs4_ff_layout_stat_io_update_requested(&mirror->read_stat, requested);
690 set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags);
691 spin_unlock(&mirror->lock);
694 pnfs_report_layoutstat(inode, GFP_KERNEL);
698 nfs4_ff_layout_stat_io_end_read(struct rpc_task *task,
699 struct nfs4_ff_layout_mirror *mirror,
703 spin_lock(&mirror->lock);
704 nfs4_ff_layout_stat_io_update_completed(&mirror->read_stat,
705 requested, completed,
706 ktime_get(), task->tk_start);
707 set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags);
708 spin_unlock(&mirror->lock);
712 nfs4_ff_layout_stat_io_start_write(struct inode *inode,
713 struct nfs4_ff_layout_mirror *mirror,
714 __u64 requested, ktime_t now)
718 spin_lock(&mirror->lock);
719 report = nfs4_ff_layoutstat_start_io(mirror , &mirror->write_stat, now);
720 nfs4_ff_layout_stat_io_update_requested(&mirror->write_stat, requested);
721 set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags);
722 spin_unlock(&mirror->lock);
725 pnfs_report_layoutstat(inode, GFP_NOIO);
729 nfs4_ff_layout_stat_io_end_write(struct rpc_task *task,
730 struct nfs4_ff_layout_mirror *mirror,
733 enum nfs3_stable_how committed)
735 if (committed == NFS_UNSTABLE)
736 requested = completed = 0;
738 spin_lock(&mirror->lock);
739 nfs4_ff_layout_stat_io_update_completed(&mirror->write_stat,
740 requested, completed, ktime_get(), task->tk_start);
741 set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags);
742 spin_unlock(&mirror->lock);
746 ff_layout_alloc_commit_info(struct pnfs_layout_segment *lseg,
747 struct nfs_commit_info *cinfo,
750 struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg);
751 struct pnfs_commit_bucket *buckets;
754 if (cinfo->ds->nbuckets != 0) {
755 /* This assumes there is only one RW lseg per file.
756 * To support multiple lseg per file, we need to
757 * change struct pnfs_commit_bucket to allow dynamic
758 * increasing nbuckets.
763 size = ff_layout_get_lseg_count(fls) * FF_LAYOUT_MIRROR_COUNT(lseg);
765 buckets = kcalloc(size, sizeof(struct pnfs_commit_bucket),
772 spin_lock(&cinfo->inode->i_lock);
773 if (cinfo->ds->nbuckets != 0)
776 cinfo->ds->buckets = buckets;
777 cinfo->ds->nbuckets = size;
778 for (i = 0; i < size; i++) {
779 INIT_LIST_HEAD(&buckets[i].written);
780 INIT_LIST_HEAD(&buckets[i].committing);
781 /* mark direct verifier as unset */
782 buckets[i].direct_verf.committed =
783 NFS_INVALID_STABLE_HOW;
786 spin_unlock(&cinfo->inode->i_lock);
791 static struct nfs4_pnfs_ds *
792 ff_layout_choose_best_ds_for_read(struct pnfs_layout_segment *lseg,
796 struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg);
797 struct nfs4_pnfs_ds *ds;
798 bool fail_return = false;
801 /* mirrors are sorted by efficiency */
802 for (idx = start_idx; idx < fls->mirror_array_cnt; idx++) {
803 if (idx+1 == fls->mirror_array_cnt)
805 ds = nfs4_ff_layout_prepare_ds(lseg, idx, fail_return);
816 ff_layout_pg_get_read(struct nfs_pageio_descriptor *pgio,
817 struct nfs_page *req,
820 pnfs_put_lseg(pgio->pg_lseg);
821 pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
828 if (IS_ERR(pgio->pg_lseg)) {
829 pgio->pg_error = PTR_ERR(pgio->pg_lseg);
830 pgio->pg_lseg = NULL;
835 ff_layout_pg_init_read(struct nfs_pageio_descriptor *pgio,
836 struct nfs_page *req)
838 struct nfs_pgio_mirror *pgm;
839 struct nfs4_ff_layout_mirror *mirror;
840 struct nfs4_pnfs_ds *ds;
844 pnfs_generic_pg_check_layout(pgio);
845 /* Use full layout for now */
846 if (!pgio->pg_lseg) {
847 ff_layout_pg_get_read(pgio, req, false);
851 if (ff_layout_avoid_read_on_rw(pgio->pg_lseg)) {
852 ff_layout_pg_get_read(pgio, req, true);
857 ds = ff_layout_choose_best_ds_for_read(pgio->pg_lseg, 0, &ds_idx);
859 if (!ff_layout_no_fallback_to_mds(pgio->pg_lseg))
861 pnfs_put_lseg(pgio->pg_lseg);
862 pgio->pg_lseg = NULL;
863 /* Sleep for 1 second before retrying */
868 mirror = FF_LAYOUT_COMP(pgio->pg_lseg, ds_idx);
870 pgio->pg_mirror_idx = ds_idx;
872 /* read always uses only one mirror - idx 0 for pgio layer */
873 pgm = &pgio->pg_mirrors[0];
874 pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].rsize;
878 if (pgio->pg_error < 0)
881 pnfs_put_lseg(pgio->pg_lseg);
882 pgio->pg_lseg = NULL;
883 nfs_pageio_reset_read_mds(pgio);
887 ff_layout_pg_init_write(struct nfs_pageio_descriptor *pgio,
888 struct nfs_page *req)
890 struct nfs4_ff_layout_mirror *mirror;
891 struct nfs_pgio_mirror *pgm;
892 struct nfs_commit_info cinfo;
893 struct nfs4_pnfs_ds *ds;
898 pnfs_generic_pg_check_layout(pgio);
899 if (!pgio->pg_lseg) {
900 pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
907 if (IS_ERR(pgio->pg_lseg)) {
908 pgio->pg_error = PTR_ERR(pgio->pg_lseg);
909 pgio->pg_lseg = NULL;
913 /* If no lseg, fall back to write through mds */
914 if (pgio->pg_lseg == NULL)
917 nfs_init_cinfo(&cinfo, pgio->pg_inode, pgio->pg_dreq);
918 status = ff_layout_alloc_commit_info(pgio->pg_lseg, &cinfo, GFP_NOFS);
922 /* Use a direct mapping of ds_idx to pgio mirror_idx */
923 if (WARN_ON_ONCE(pgio->pg_mirror_count !=
924 FF_LAYOUT_MIRROR_COUNT(pgio->pg_lseg)))
927 for (i = 0; i < pgio->pg_mirror_count; i++) {
928 ds = nfs4_ff_layout_prepare_ds(pgio->pg_lseg, i, true);
930 if (!ff_layout_no_fallback_to_mds(pgio->pg_lseg))
932 pnfs_put_lseg(pgio->pg_lseg);
933 pgio->pg_lseg = NULL;
934 /* Sleep for 1 second before retrying */
938 pgm = &pgio->pg_mirrors[i];
939 mirror = FF_LAYOUT_COMP(pgio->pg_lseg, i);
940 pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].wsize;
946 pnfs_put_lseg(pgio->pg_lseg);
947 pgio->pg_lseg = NULL;
948 nfs_pageio_reset_write_mds(pgio);
952 ff_layout_pg_get_mirror_count_write(struct nfs_pageio_descriptor *pgio,
953 struct nfs_page *req)
955 if (!pgio->pg_lseg) {
956 pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
963 if (IS_ERR(pgio->pg_lseg)) {
964 pgio->pg_error = PTR_ERR(pgio->pg_lseg);
965 pgio->pg_lseg = NULL;
970 return FF_LAYOUT_MIRROR_COUNT(pgio->pg_lseg);
972 /* no lseg means that pnfs is not in use, so no mirroring here */
973 nfs_pageio_reset_write_mds(pgio);
978 static const struct nfs_pageio_ops ff_layout_pg_read_ops = {
979 .pg_init = ff_layout_pg_init_read,
980 .pg_test = pnfs_generic_pg_test,
981 .pg_doio = pnfs_generic_pg_readpages,
982 .pg_cleanup = pnfs_generic_pg_cleanup,
985 static const struct nfs_pageio_ops ff_layout_pg_write_ops = {
986 .pg_init = ff_layout_pg_init_write,
987 .pg_test = pnfs_generic_pg_test,
988 .pg_doio = pnfs_generic_pg_writepages,
989 .pg_get_mirror_count = ff_layout_pg_get_mirror_count_write,
990 .pg_cleanup = pnfs_generic_pg_cleanup,
993 static void ff_layout_reset_write(struct nfs_pgio_header *hdr, bool retry_pnfs)
995 struct rpc_task *task = &hdr->task;
997 pnfs_layoutcommit_inode(hdr->inode, false);
1000 dprintk("%s Reset task %5u for i/o through pNFS "
1001 "(req %s/%llu, %u bytes @ offset %llu)\n", __func__,
1003 hdr->inode->i_sb->s_id,
1004 (unsigned long long)NFS_FILEID(hdr->inode),
1006 (unsigned long long)hdr->args.offset);
1008 hdr->completion_ops->reschedule_io(hdr);
1012 if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
1013 dprintk("%s Reset task %5u for i/o through MDS "
1014 "(req %s/%llu, %u bytes @ offset %llu)\n", __func__,
1016 hdr->inode->i_sb->s_id,
1017 (unsigned long long)NFS_FILEID(hdr->inode),
1019 (unsigned long long)hdr->args.offset);
1021 task->tk_status = pnfs_write_done_resend_to_mds(hdr);
1025 static void ff_layout_reset_read(struct nfs_pgio_header *hdr)
1027 struct rpc_task *task = &hdr->task;
1029 pnfs_layoutcommit_inode(hdr->inode, false);
1031 if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
1032 dprintk("%s Reset task %5u for i/o through MDS "
1033 "(req %s/%llu, %u bytes @ offset %llu)\n", __func__,
1035 hdr->inode->i_sb->s_id,
1036 (unsigned long long)NFS_FILEID(hdr->inode),
1038 (unsigned long long)hdr->args.offset);
1040 task->tk_status = pnfs_read_done_resend_to_mds(hdr);
1044 static int ff_layout_async_handle_error_v4(struct rpc_task *task,
1045 struct nfs4_state *state,
1046 struct nfs_client *clp,
1047 struct pnfs_layout_segment *lseg,
1050 struct pnfs_layout_hdr *lo = lseg->pls_layout;
1051 struct inode *inode = lo->plh_inode;
1052 struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
1053 struct nfs4_slot_table *tbl = &clp->cl_session->fc_slot_table;
1055 switch (task->tk_status) {
1056 case -NFS4ERR_BADSESSION:
1057 case -NFS4ERR_BADSLOT:
1058 case -NFS4ERR_BAD_HIGH_SLOT:
1059 case -NFS4ERR_DEADSESSION:
1060 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
1061 case -NFS4ERR_SEQ_FALSE_RETRY:
1062 case -NFS4ERR_SEQ_MISORDERED:
1063 dprintk("%s ERROR %d, Reset session. Exchangeid "
1064 "flags 0x%x\n", __func__, task->tk_status,
1065 clp->cl_exchange_flags);
1066 nfs4_schedule_session_recovery(clp->cl_session, task->tk_status);
1068 case -NFS4ERR_DELAY:
1069 case -NFS4ERR_GRACE:
1070 rpc_delay(task, FF_LAYOUT_POLL_RETRY_MAX);
1072 case -NFS4ERR_RETRY_UNCACHED_REP:
1074 /* Invalidate Layout errors */
1075 case -NFS4ERR_PNFS_NO_LAYOUT:
1076 case -ESTALE: /* mapped NFS4ERR_STALE */
1077 case -EBADHANDLE: /* mapped NFS4ERR_BADHANDLE */
1078 case -EISDIR: /* mapped NFS4ERR_ISDIR */
1079 case -NFS4ERR_FHEXPIRED:
1080 case -NFS4ERR_WRONG_TYPE:
1081 dprintk("%s Invalid layout error %d\n", __func__,
1084 * Destroy layout so new i/o will get a new layout.
1085 * Layout will not be destroyed until all current lseg
1086 * references are put. Mark layout as invalid to resend failed
1087 * i/o and all i/o waiting on the slot table to the MDS until
1088 * layout is destroyed and a new valid layout is obtained.
1090 pnfs_destroy_layout(NFS_I(inode));
1091 rpc_wake_up(&tbl->slot_tbl_waitq);
1093 /* RPC connection errors */
1101 dprintk("%s DS connection error %d\n", __func__,
1103 nfs4_delete_deviceid(devid->ld, devid->nfs_client,
1105 rpc_wake_up(&tbl->slot_tbl_waitq);
1108 if (ff_layout_avoid_mds_available_ds(lseg))
1109 return -NFS4ERR_RESET_TO_PNFS;
1111 dprintk("%s Retry through MDS. Error %d\n", __func__,
1113 return -NFS4ERR_RESET_TO_MDS;
1115 task->tk_status = 0;
1119 /* Retry all errors through either pNFS or MDS except for -EJUKEBOX */
1120 static int ff_layout_async_handle_error_v3(struct rpc_task *task,
1121 struct pnfs_layout_segment *lseg,
1124 struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
1126 switch (task->tk_status) {
1127 /* File access problems. Don't mark the device as unavailable */
1136 nfs_inc_stats(lseg->pls_layout->plh_inode, NFSIOS_DELAY);
1139 dprintk("%s DS connection error %d\n", __func__,
1141 nfs4_delete_deviceid(devid->ld, devid->nfs_client,
1144 /* FIXME: Need to prevent infinite looping here. */
1145 return -NFS4ERR_RESET_TO_PNFS;
1147 task->tk_status = 0;
1148 rpc_restart_call_prepare(task);
1149 rpc_delay(task, NFS_JUKEBOX_RETRY_TIME);
1153 static int ff_layout_async_handle_error(struct rpc_task *task,
1154 struct nfs4_state *state,
1155 struct nfs_client *clp,
1156 struct pnfs_layout_segment *lseg,
1159 int vers = clp->cl_nfs_mod->rpc_vers->number;
1161 if (task->tk_status >= 0)
1164 /* Handle the case of an invalid layout segment */
1165 if (!pnfs_is_valid_lseg(lseg))
1166 return -NFS4ERR_RESET_TO_PNFS;
1170 return ff_layout_async_handle_error_v3(task, lseg, idx);
1172 return ff_layout_async_handle_error_v4(task, state, clp,
1175 /* should never happen */
1181 static void ff_layout_io_track_ds_error(struct pnfs_layout_segment *lseg,
1182 int idx, u64 offset, u64 length,
1183 u32 status, int opnum, int error)
1185 struct nfs4_ff_layout_mirror *mirror;
1192 case -EPROTONOSUPPORT:
1203 status = NFS4ERR_NXIO;
1206 status = NFS4ERR_ACCESS;
1221 mirror = FF_LAYOUT_COMP(lseg, idx);
1222 err = ff_layout_track_ds_error(FF_LAYOUT_FROM_HDR(lseg->pls_layout),
1223 mirror, offset, length, status, opnum,
1225 pnfs_error_mark_layout_for_return(lseg->pls_layout->plh_inode, lseg);
1226 dprintk("%s: err %d op %d status %u\n", __func__, err, opnum, status);
1229 /* NFS_PROTO call done callback routines */
1230 static int ff_layout_read_done_cb(struct rpc_task *task,
1231 struct nfs_pgio_header *hdr)
1235 trace_nfs4_pnfs_read(hdr, task->tk_status);
1236 if (task->tk_status < 0)
1237 ff_layout_io_track_ds_error(hdr->lseg, hdr->pgio_mirror_idx,
1238 hdr->args.offset, hdr->args.count,
1239 hdr->res.op_status, OP_READ,
1241 err = ff_layout_async_handle_error(task, hdr->args.context->state,
1242 hdr->ds_clp, hdr->lseg,
1243 hdr->pgio_mirror_idx);
1245 clear_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
1246 clear_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
1248 case -NFS4ERR_RESET_TO_PNFS:
1249 if (ff_layout_choose_best_ds_for_read(hdr->lseg,
1250 hdr->pgio_mirror_idx + 1,
1251 &hdr->pgio_mirror_idx))
1253 set_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
1254 return task->tk_status;
1255 case -NFS4ERR_RESET_TO_MDS:
1256 set_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
1257 return task->tk_status;
1264 rpc_restart_call_prepare(task);
1269 ff_layout_need_layoutcommit(struct pnfs_layout_segment *lseg)
1271 return !(FF_LAYOUT_LSEG(lseg)->flags & FF_FLAGS_NO_LAYOUTCOMMIT);
1275 * We reference the rpc_cred of the first WRITE that triggers the need for
1276 * a LAYOUTCOMMIT, and use it to send the layoutcommit compound.
1277 * rfc5661 is not clear about which credential should be used.
1279 * Flexlayout client should treat DS replied FILE_SYNC as DATA_SYNC, so
1280 * to follow http://www.rfc-editor.org/errata_search.php?rfc=5661&eid=2751
1281 * we always send layoutcommit after DS writes.
1284 ff_layout_set_layoutcommit(struct inode *inode,
1285 struct pnfs_layout_segment *lseg,
1288 if (!ff_layout_need_layoutcommit(lseg))
1291 pnfs_set_layoutcommit(inode, lseg, end_offset);
1292 dprintk("%s inode %lu pls_end_pos %llu\n", __func__, inode->i_ino,
1293 (unsigned long long) NFS_I(inode)->layout->plh_lwb);
1297 ff_layout_device_unavailable(struct pnfs_layout_segment *lseg, int idx)
1299 /* No mirroring for now */
1300 struct nfs4_deviceid_node *node = FF_LAYOUT_DEVID_NODE(lseg, idx);
1302 return ff_layout_test_devid_unavailable(node);
1305 static void ff_layout_read_record_layoutstats_start(struct rpc_task *task,
1306 struct nfs_pgio_header *hdr)
1308 if (test_and_set_bit(NFS_IOHDR_STAT, &hdr->flags))
1310 nfs4_ff_layout_stat_io_start_read(hdr->inode,
1311 FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
1316 static void ff_layout_read_record_layoutstats_done(struct rpc_task *task,
1317 struct nfs_pgio_header *hdr)
1319 if (!test_and_clear_bit(NFS_IOHDR_STAT, &hdr->flags))
1321 nfs4_ff_layout_stat_io_end_read(task,
1322 FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
1325 set_bit(NFS_LSEG_LAYOUTRETURN, &hdr->lseg->pls_flags);
1328 static int ff_layout_read_prepare_common(struct rpc_task *task,
1329 struct nfs_pgio_header *hdr)
1331 if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags))) {
1332 rpc_exit(task, -EIO);
1335 if (ff_layout_device_unavailable(hdr->lseg, hdr->pgio_mirror_idx)) {
1336 rpc_exit(task, -EHOSTDOWN);
1340 ff_layout_read_record_layoutstats_start(task, hdr);
1345 * Call ops for the async read/write cases
1346 * In the case of dense layouts, the offset needs to be reset to its
1349 static void ff_layout_read_prepare_v3(struct rpc_task *task, void *data)
1351 struct nfs_pgio_header *hdr = data;
1353 if (ff_layout_read_prepare_common(task, hdr))
1356 rpc_call_start(task);
1359 static void ff_layout_read_prepare_v4(struct rpc_task *task, void *data)
1361 struct nfs_pgio_header *hdr = data;
1363 if (nfs4_setup_sequence(hdr->ds_clp,
1364 &hdr->args.seq_args,
1369 ff_layout_read_prepare_common(task, hdr);
1372 static void ff_layout_read_call_done(struct rpc_task *task, void *data)
1374 struct nfs_pgio_header *hdr = data;
1376 dprintk("--> %s task->tk_status %d\n", __func__, task->tk_status);
1378 if (test_bit(NFS_IOHDR_REDO, &hdr->flags) &&
1379 task->tk_status == 0) {
1380 nfs4_sequence_done(task, &hdr->res.seq_res);
1384 /* Note this may cause RPC to be resent */
1385 hdr->mds_ops->rpc_call_done(task, hdr);
1388 static void ff_layout_read_count_stats(struct rpc_task *task, void *data)
1390 struct nfs_pgio_header *hdr = data;
1392 ff_layout_read_record_layoutstats_done(task, hdr);
1393 rpc_count_iostats_metrics(task,
1394 &NFS_CLIENT(hdr->inode)->cl_metrics[NFSPROC4_CLNT_READ]);
1397 static void ff_layout_read_release(void *data)
1399 struct nfs_pgio_header *hdr = data;
1401 ff_layout_read_record_layoutstats_done(&hdr->task, hdr);
1402 if (test_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags))
1403 pnfs_read_resend_pnfs(hdr);
1404 else if (test_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags))
1405 ff_layout_reset_read(hdr);
1406 pnfs_generic_rw_release(data);
1410 static int ff_layout_write_done_cb(struct rpc_task *task,
1411 struct nfs_pgio_header *hdr)
1413 loff_t end_offs = 0;
1416 trace_nfs4_pnfs_write(hdr, task->tk_status);
1417 if (task->tk_status < 0)
1418 ff_layout_io_track_ds_error(hdr->lseg, hdr->pgio_mirror_idx,
1419 hdr->args.offset, hdr->args.count,
1420 hdr->res.op_status, OP_WRITE,
1422 err = ff_layout_async_handle_error(task, hdr->args.context->state,
1423 hdr->ds_clp, hdr->lseg,
1424 hdr->pgio_mirror_idx);
1426 clear_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
1427 clear_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
1429 case -NFS4ERR_RESET_TO_PNFS:
1430 set_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
1431 return task->tk_status;
1432 case -NFS4ERR_RESET_TO_MDS:
1433 set_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
1434 return task->tk_status;
1439 if (hdr->res.verf->committed == NFS_FILE_SYNC ||
1440 hdr->res.verf->committed == NFS_DATA_SYNC)
1441 end_offs = hdr->mds_offset + (loff_t)hdr->res.count;
1443 /* Note: if the write is unstable, don't set end_offs until commit */
1444 ff_layout_set_layoutcommit(hdr->inode, hdr->lseg, end_offs);
1446 /* zero out fattr since we don't care DS attr at all */
1447 hdr->fattr.valid = 0;
1448 if (task->tk_status >= 0)
1449 nfs_writeback_update_inode(hdr);
1454 static int ff_layout_commit_done_cb(struct rpc_task *task,
1455 struct nfs_commit_data *data)
1459 trace_nfs4_pnfs_commit_ds(data, task->tk_status);
1460 if (task->tk_status < 0)
1461 ff_layout_io_track_ds_error(data->lseg, data->ds_commit_index,
1462 data->args.offset, data->args.count,
1463 data->res.op_status, OP_COMMIT,
1465 err = ff_layout_async_handle_error(task, NULL, data->ds_clp,
1466 data->lseg, data->ds_commit_index);
1469 case -NFS4ERR_RESET_TO_PNFS:
1470 pnfs_generic_prepare_to_resend_writes(data);
1472 case -NFS4ERR_RESET_TO_MDS:
1473 pnfs_generic_prepare_to_resend_writes(data);
1476 rpc_restart_call_prepare(task);
1480 ff_layout_set_layoutcommit(data->inode, data->lseg, data->lwb);
1485 static void ff_layout_write_record_layoutstats_start(struct rpc_task *task,
1486 struct nfs_pgio_header *hdr)
1488 if (test_and_set_bit(NFS_IOHDR_STAT, &hdr->flags))
1490 nfs4_ff_layout_stat_io_start_write(hdr->inode,
1491 FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
1496 static void ff_layout_write_record_layoutstats_done(struct rpc_task *task,
1497 struct nfs_pgio_header *hdr)
1499 if (!test_and_clear_bit(NFS_IOHDR_STAT, &hdr->flags))
1501 nfs4_ff_layout_stat_io_end_write(task,
1502 FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
1503 hdr->args.count, hdr->res.count,
1504 hdr->res.verf->committed);
1505 set_bit(NFS_LSEG_LAYOUTRETURN, &hdr->lseg->pls_flags);
1508 static int ff_layout_write_prepare_common(struct rpc_task *task,
1509 struct nfs_pgio_header *hdr)
1511 if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags))) {
1512 rpc_exit(task, -EIO);
1516 if (ff_layout_device_unavailable(hdr->lseg, hdr->pgio_mirror_idx)) {
1517 rpc_exit(task, -EHOSTDOWN);
1521 ff_layout_write_record_layoutstats_start(task, hdr);
1525 static void ff_layout_write_prepare_v3(struct rpc_task *task, void *data)
1527 struct nfs_pgio_header *hdr = data;
1529 if (ff_layout_write_prepare_common(task, hdr))
1532 rpc_call_start(task);
1535 static void ff_layout_write_prepare_v4(struct rpc_task *task, void *data)
1537 struct nfs_pgio_header *hdr = data;
1539 if (nfs4_setup_sequence(hdr->ds_clp,
1540 &hdr->args.seq_args,
1545 ff_layout_write_prepare_common(task, hdr);
1548 static void ff_layout_write_call_done(struct rpc_task *task, void *data)
1550 struct nfs_pgio_header *hdr = data;
1552 if (test_bit(NFS_IOHDR_REDO, &hdr->flags) &&
1553 task->tk_status == 0) {
1554 nfs4_sequence_done(task, &hdr->res.seq_res);
1558 /* Note this may cause RPC to be resent */
1559 hdr->mds_ops->rpc_call_done(task, hdr);
1562 static void ff_layout_write_count_stats(struct rpc_task *task, void *data)
1564 struct nfs_pgio_header *hdr = data;
1566 ff_layout_write_record_layoutstats_done(task, hdr);
1567 rpc_count_iostats_metrics(task,
1568 &NFS_CLIENT(hdr->inode)->cl_metrics[NFSPROC4_CLNT_WRITE]);
1571 static void ff_layout_write_release(void *data)
1573 struct nfs_pgio_header *hdr = data;
1575 ff_layout_write_record_layoutstats_done(&hdr->task, hdr);
1576 if (test_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags))
1577 ff_layout_reset_write(hdr, true);
1578 else if (test_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags))
1579 ff_layout_reset_write(hdr, false);
1580 pnfs_generic_rw_release(data);
1583 static void ff_layout_commit_record_layoutstats_start(struct rpc_task *task,
1584 struct nfs_commit_data *cdata)
1586 if (test_and_set_bit(NFS_IOHDR_STAT, &cdata->flags))
1588 nfs4_ff_layout_stat_io_start_write(cdata->inode,
1589 FF_LAYOUT_COMP(cdata->lseg, cdata->ds_commit_index),
1593 static void ff_layout_commit_record_layoutstats_done(struct rpc_task *task,
1594 struct nfs_commit_data *cdata)
1596 struct nfs_page *req;
1599 if (!test_and_clear_bit(NFS_IOHDR_STAT, &cdata->flags))
1602 if (task->tk_status == 0) {
1603 list_for_each_entry(req, &cdata->pages, wb_list)
1604 count += req->wb_bytes;
1606 nfs4_ff_layout_stat_io_end_write(task,
1607 FF_LAYOUT_COMP(cdata->lseg, cdata->ds_commit_index),
1608 count, count, NFS_FILE_SYNC);
1609 set_bit(NFS_LSEG_LAYOUTRETURN, &cdata->lseg->pls_flags);
1612 static void ff_layout_commit_prepare_common(struct rpc_task *task,
1613 struct nfs_commit_data *cdata)
1615 ff_layout_commit_record_layoutstats_start(task, cdata);
1618 static void ff_layout_commit_prepare_v3(struct rpc_task *task, void *data)
1620 ff_layout_commit_prepare_common(task, data);
1621 rpc_call_start(task);
1624 static void ff_layout_commit_prepare_v4(struct rpc_task *task, void *data)
1626 struct nfs_commit_data *wdata = data;
1628 if (nfs4_setup_sequence(wdata->ds_clp,
1629 &wdata->args.seq_args,
1630 &wdata->res.seq_res,
1633 ff_layout_commit_prepare_common(task, data);
1636 static void ff_layout_commit_done(struct rpc_task *task, void *data)
1638 pnfs_generic_write_commit_done(task, data);
1641 static void ff_layout_commit_count_stats(struct rpc_task *task, void *data)
1643 struct nfs_commit_data *cdata = data;
1645 ff_layout_commit_record_layoutstats_done(task, cdata);
1646 rpc_count_iostats_metrics(task,
1647 &NFS_CLIENT(cdata->inode)->cl_metrics[NFSPROC4_CLNT_COMMIT]);
1650 static void ff_layout_commit_release(void *data)
1652 struct nfs_commit_data *cdata = data;
1654 ff_layout_commit_record_layoutstats_done(&cdata->task, cdata);
1655 pnfs_generic_commit_release(data);
1658 static const struct rpc_call_ops ff_layout_read_call_ops_v3 = {
1659 .rpc_call_prepare = ff_layout_read_prepare_v3,
1660 .rpc_call_done = ff_layout_read_call_done,
1661 .rpc_count_stats = ff_layout_read_count_stats,
1662 .rpc_release = ff_layout_read_release,
1665 static const struct rpc_call_ops ff_layout_read_call_ops_v4 = {
1666 .rpc_call_prepare = ff_layout_read_prepare_v4,
1667 .rpc_call_done = ff_layout_read_call_done,
1668 .rpc_count_stats = ff_layout_read_count_stats,
1669 .rpc_release = ff_layout_read_release,
1672 static const struct rpc_call_ops ff_layout_write_call_ops_v3 = {
1673 .rpc_call_prepare = ff_layout_write_prepare_v3,
1674 .rpc_call_done = ff_layout_write_call_done,
1675 .rpc_count_stats = ff_layout_write_count_stats,
1676 .rpc_release = ff_layout_write_release,
1679 static const struct rpc_call_ops ff_layout_write_call_ops_v4 = {
1680 .rpc_call_prepare = ff_layout_write_prepare_v4,
1681 .rpc_call_done = ff_layout_write_call_done,
1682 .rpc_count_stats = ff_layout_write_count_stats,
1683 .rpc_release = ff_layout_write_release,
1686 static const struct rpc_call_ops ff_layout_commit_call_ops_v3 = {
1687 .rpc_call_prepare = ff_layout_commit_prepare_v3,
1688 .rpc_call_done = ff_layout_commit_done,
1689 .rpc_count_stats = ff_layout_commit_count_stats,
1690 .rpc_release = ff_layout_commit_release,
1693 static const struct rpc_call_ops ff_layout_commit_call_ops_v4 = {
1694 .rpc_call_prepare = ff_layout_commit_prepare_v4,
1695 .rpc_call_done = ff_layout_commit_done,
1696 .rpc_count_stats = ff_layout_commit_count_stats,
1697 .rpc_release = ff_layout_commit_release,
1700 static enum pnfs_try_status
1701 ff_layout_read_pagelist(struct nfs_pgio_header *hdr)
1703 struct pnfs_layout_segment *lseg = hdr->lseg;
1704 struct nfs4_pnfs_ds *ds;
1705 struct rpc_clnt *ds_clnt;
1706 const struct cred *ds_cred;
1707 loff_t offset = hdr->args.offset;
1708 u32 idx = hdr->pgio_mirror_idx;
1712 dprintk("--> %s ino %lu pgbase %u req %zu@%llu\n",
1713 __func__, hdr->inode->i_ino,
1714 hdr->args.pgbase, (size_t)hdr->args.count, offset);
1716 ds = nfs4_ff_layout_prepare_ds(lseg, idx, false);
1720 ds_clnt = nfs4_ff_find_or_create_ds_client(lseg, idx, ds->ds_clp,
1722 if (IS_ERR(ds_clnt))
1725 ds_cred = ff_layout_get_ds_cred(lseg, idx, hdr->cred);
1729 vers = nfs4_ff_layout_ds_version(lseg, idx);
1731 dprintk("%s USE DS: %s cl_count %d vers %d\n", __func__,
1732 ds->ds_remotestr, refcount_read(&ds->ds_clp->cl_count), vers);
1734 hdr->pgio_done_cb = ff_layout_read_done_cb;
1735 refcount_inc(&ds->ds_clp->cl_count);
1736 hdr->ds_clp = ds->ds_clp;
1737 fh = nfs4_ff_layout_select_ds_fh(lseg, idx);
1742 !nfs4_ff_layout_select_ds_stateid(lseg, idx, &hdr->args.stateid))
1746 * Note that if we ever decide to split across DSes,
1747 * then we may need to handle dense-like offsets.
1749 hdr->args.offset = offset;
1750 hdr->mds_offset = offset;
1752 /* Perform an asynchronous read to ds */
1753 nfs_initiate_pgio(ds_clnt, hdr, ds_cred, ds->ds_clp->rpc_ops,
1754 vers == 3 ? &ff_layout_read_call_ops_v3 :
1755 &ff_layout_read_call_ops_v4,
1756 0, RPC_TASK_SOFTCONN);
1758 return PNFS_ATTEMPTED;
1761 if (ff_layout_avoid_mds_available_ds(lseg))
1762 return PNFS_TRY_AGAIN;
1763 return PNFS_NOT_ATTEMPTED;
1766 /* Perform async writes. */
1767 static enum pnfs_try_status
1768 ff_layout_write_pagelist(struct nfs_pgio_header *hdr, int sync)
1770 struct pnfs_layout_segment *lseg = hdr->lseg;
1771 struct nfs4_pnfs_ds *ds;
1772 struct rpc_clnt *ds_clnt;
1773 const struct cred *ds_cred;
1774 loff_t offset = hdr->args.offset;
1777 int idx = hdr->pgio_mirror_idx;
1779 ds = nfs4_ff_layout_prepare_ds(lseg, idx, true);
1783 ds_clnt = nfs4_ff_find_or_create_ds_client(lseg, idx, ds->ds_clp,
1785 if (IS_ERR(ds_clnt))
1788 ds_cred = ff_layout_get_ds_cred(lseg, idx, hdr->cred);
1792 vers = nfs4_ff_layout_ds_version(lseg, idx);
1794 dprintk("%s ino %lu sync %d req %zu@%llu DS: %s cl_count %d vers %d\n",
1795 __func__, hdr->inode->i_ino, sync, (size_t) hdr->args.count,
1796 offset, ds->ds_remotestr, refcount_read(&ds->ds_clp->cl_count),
1799 hdr->pgio_done_cb = ff_layout_write_done_cb;
1800 refcount_inc(&ds->ds_clp->cl_count);
1801 hdr->ds_clp = ds->ds_clp;
1802 hdr->ds_commit_idx = idx;
1803 fh = nfs4_ff_layout_select_ds_fh(lseg, idx);
1808 !nfs4_ff_layout_select_ds_stateid(lseg, idx, &hdr->args.stateid))
1812 * Note that if we ever decide to split across DSes,
1813 * then we may need to handle dense-like offsets.
1815 hdr->args.offset = offset;
1817 /* Perform an asynchronous write */
1818 nfs_initiate_pgio(ds_clnt, hdr, ds_cred, ds->ds_clp->rpc_ops,
1819 vers == 3 ? &ff_layout_write_call_ops_v3 :
1820 &ff_layout_write_call_ops_v4,
1821 sync, RPC_TASK_SOFTCONN);
1823 return PNFS_ATTEMPTED;
1826 if (ff_layout_avoid_mds_available_ds(lseg))
1827 return PNFS_TRY_AGAIN;
1828 return PNFS_NOT_ATTEMPTED;
1831 static u32 calc_ds_index_from_commit(struct pnfs_layout_segment *lseg, u32 i)
1836 static struct nfs_fh *
1837 select_ds_fh_from_commit(struct pnfs_layout_segment *lseg, u32 i)
1839 struct nfs4_ff_layout_segment *flseg = FF_LAYOUT_LSEG(lseg);
1841 /* FIXME: Assume that there is only one NFS version available
1844 return &flseg->mirror_array[i]->fh_versions[0];
1847 static int ff_layout_initiate_commit(struct nfs_commit_data *data, int how)
1849 struct pnfs_layout_segment *lseg = data->lseg;
1850 struct nfs4_pnfs_ds *ds;
1851 struct rpc_clnt *ds_clnt;
1852 const struct cred *ds_cred;
1857 if (!lseg || !(pnfs_is_valid_lseg(lseg) ||
1858 test_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags)))
1861 idx = calc_ds_index_from_commit(lseg, data->ds_commit_index);
1862 ds = nfs4_ff_layout_prepare_ds(lseg, idx, true);
1866 ds_clnt = nfs4_ff_find_or_create_ds_client(lseg, idx, ds->ds_clp,
1868 if (IS_ERR(ds_clnt))
1871 ds_cred = ff_layout_get_ds_cred(lseg, idx, data->cred);
1875 vers = nfs4_ff_layout_ds_version(lseg, idx);
1877 dprintk("%s ino %lu, how %d cl_count %d vers %d\n", __func__,
1878 data->inode->i_ino, how, refcount_read(&ds->ds_clp->cl_count),
1880 data->commit_done_cb = ff_layout_commit_done_cb;
1881 data->cred = ds_cred;
1882 refcount_inc(&ds->ds_clp->cl_count);
1883 data->ds_clp = ds->ds_clp;
1884 fh = select_ds_fh_from_commit(lseg, data->ds_commit_index);
1888 ret = nfs_initiate_commit(ds_clnt, data, ds->ds_clp->rpc_ops,
1889 vers == 3 ? &ff_layout_commit_call_ops_v3 :
1890 &ff_layout_commit_call_ops_v4,
1891 how, RPC_TASK_SOFTCONN);
1895 pnfs_generic_prepare_to_resend_writes(data);
1896 pnfs_generic_commit_release(data);
1901 ff_layout_commit_pagelist(struct inode *inode, struct list_head *mds_pages,
1902 int how, struct nfs_commit_info *cinfo)
1904 return pnfs_generic_commit_pagelist(inode, mds_pages, how, cinfo,
1905 ff_layout_initiate_commit);
1908 static struct pnfs_ds_commit_info *
1909 ff_layout_get_ds_info(struct inode *inode)
1911 struct pnfs_layout_hdr *layout = NFS_I(inode)->layout;
1916 return &FF_LAYOUT_FROM_HDR(layout)->commit_info;
1920 ff_layout_free_deviceid_node(struct nfs4_deviceid_node *d)
1922 nfs4_ff_layout_free_deviceid(container_of(d, struct nfs4_ff_layout_ds,
1926 static int ff_layout_encode_ioerr(struct xdr_stream *xdr,
1927 const struct nfs4_layoutreturn_args *args,
1928 const struct nfs4_flexfile_layoutreturn_args *ff_args)
1932 start = xdr_reserve_space(xdr, 4);
1933 if (unlikely(!start))
1936 *start = cpu_to_be32(ff_args->num_errors);
1937 /* This assume we always return _ALL_ layouts */
1938 return ff_layout_encode_ds_ioerr(xdr, &ff_args->errors);
1942 encode_opaque_fixed(struct xdr_stream *xdr, const void *buf, size_t len)
1944 WARN_ON_ONCE(xdr_stream_encode_opaque_fixed(xdr, buf, len) < 0);
1948 ff_layout_encode_ff_iostat_head(struct xdr_stream *xdr,
1949 const nfs4_stateid *stateid,
1950 const struct nfs42_layoutstat_devinfo *devinfo)
1954 p = xdr_reserve_space(xdr, 8 + 8);
1955 p = xdr_encode_hyper(p, devinfo->offset);
1956 p = xdr_encode_hyper(p, devinfo->length);
1957 encode_opaque_fixed(xdr, stateid->data, NFS4_STATEID_SIZE);
1958 p = xdr_reserve_space(xdr, 4*8);
1959 p = xdr_encode_hyper(p, devinfo->read_count);
1960 p = xdr_encode_hyper(p, devinfo->read_bytes);
1961 p = xdr_encode_hyper(p, devinfo->write_count);
1962 p = xdr_encode_hyper(p, devinfo->write_bytes);
1963 encode_opaque_fixed(xdr, devinfo->dev_id.data, NFS4_DEVICEID4_SIZE);
1967 ff_layout_encode_ff_iostat(struct xdr_stream *xdr,
1968 const nfs4_stateid *stateid,
1969 const struct nfs42_layoutstat_devinfo *devinfo)
1971 ff_layout_encode_ff_iostat_head(xdr, stateid, devinfo);
1972 ff_layout_encode_ff_layoutupdate(xdr, devinfo,
1973 devinfo->ld_private.data);
1976 /* report nothing for now */
1977 static void ff_layout_encode_iostats_array(struct xdr_stream *xdr,
1978 const struct nfs4_layoutreturn_args *args,
1979 struct nfs4_flexfile_layoutreturn_args *ff_args)
1984 p = xdr_reserve_space(xdr, 4);
1985 *p = cpu_to_be32(ff_args->num_dev);
1986 for (i = 0; i < ff_args->num_dev; i++)
1987 ff_layout_encode_ff_iostat(xdr,
1988 &args->layout->plh_stateid,
1989 &ff_args->devinfo[i]);
1993 ff_layout_free_iostats_array(struct nfs42_layoutstat_devinfo *devinfo,
1994 unsigned int num_entries)
1998 for (i = 0; i < num_entries; i++) {
1999 if (!devinfo[i].ld_private.ops)
2001 if (!devinfo[i].ld_private.ops->free)
2003 devinfo[i].ld_private.ops->free(&devinfo[i].ld_private);
2007 static struct nfs4_deviceid_node *
2008 ff_layout_alloc_deviceid_node(struct nfs_server *server,
2009 struct pnfs_device *pdev, gfp_t gfp_flags)
2011 struct nfs4_ff_layout_ds *dsaddr;
2013 dsaddr = nfs4_ff_alloc_deviceid_node(server, pdev, gfp_flags);
2016 return &dsaddr->id_node;
2020 ff_layout_encode_layoutreturn(struct xdr_stream *xdr,
2021 const void *voidargs,
2022 const struct nfs4_xdr_opaque_data *ff_opaque)
2024 const struct nfs4_layoutreturn_args *args = voidargs;
2025 struct nfs4_flexfile_layoutreturn_args *ff_args = ff_opaque->data;
2026 struct xdr_buf tmp_buf = {
2029 .iov_base = page_address(ff_args->pages[0]),
2032 .buflen = PAGE_SIZE,
2034 struct xdr_stream tmp_xdr;
2037 dprintk("%s: Begin\n", __func__);
2039 xdr_init_encode(&tmp_xdr, &tmp_buf, NULL);
2041 ff_layout_encode_ioerr(&tmp_xdr, args, ff_args);
2042 ff_layout_encode_iostats_array(&tmp_xdr, args, ff_args);
2044 start = xdr_reserve_space(xdr, 4);
2045 *start = cpu_to_be32(tmp_buf.len);
2046 xdr_write_pages(xdr, ff_args->pages, 0, tmp_buf.len);
2048 dprintk("%s: Return\n", __func__);
2052 ff_layout_free_layoutreturn(struct nfs4_xdr_opaque_data *args)
2054 struct nfs4_flexfile_layoutreturn_args *ff_args;
2058 ff_args = args->data;
2061 ff_layout_free_ds_ioerr(&ff_args->errors);
2062 ff_layout_free_iostats_array(ff_args->devinfo, ff_args->num_dev);
2064 put_page(ff_args->pages[0]);
2068 static const struct nfs4_xdr_opaque_ops layoutreturn_ops = {
2069 .encode = ff_layout_encode_layoutreturn,
2070 .free = ff_layout_free_layoutreturn,
2074 ff_layout_prepare_layoutreturn(struct nfs4_layoutreturn_args *args)
2076 struct nfs4_flexfile_layoutreturn_args *ff_args;
2077 struct nfs4_flexfile_layout *ff_layout = FF_LAYOUT_FROM_HDR(args->layout);
2079 ff_args = kmalloc(sizeof(*ff_args), GFP_KERNEL);
2082 ff_args->pages[0] = alloc_page(GFP_KERNEL);
2083 if (!ff_args->pages[0])
2084 goto out_nomem_free;
2086 INIT_LIST_HEAD(&ff_args->errors);
2087 ff_args->num_errors = ff_layout_fetch_ds_ioerr(args->layout,
2088 &args->range, &ff_args->errors,
2089 FF_LAYOUTRETURN_MAXERR);
2091 spin_lock(&args->inode->i_lock);
2092 ff_args->num_dev = ff_layout_mirror_prepare_stats(&ff_layout->generic_hdr,
2093 &ff_args->devinfo[0], ARRAY_SIZE(ff_args->devinfo));
2094 spin_unlock(&args->inode->i_lock);
2096 args->ld_private->ops = &layoutreturn_ops;
2097 args->ld_private->data = ff_args;
2106 ff_layout_ntop4(const struct sockaddr *sap, char *buf, const size_t buflen)
2108 const struct sockaddr_in *sin = (struct sockaddr_in *)sap;
2110 return snprintf(buf, buflen, "%pI4", &sin->sin_addr);
2114 ff_layout_ntop6_noscopeid(const struct sockaddr *sap, char *buf,
2117 const struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sap;
2118 const struct in6_addr *addr = &sin6->sin6_addr;
2121 * RFC 4291, Section 2.2.2
2123 * Shorthanded ANY address
2125 if (ipv6_addr_any(addr))
2126 return snprintf(buf, buflen, "::");
2129 * RFC 4291, Section 2.2.2
2131 * Shorthanded loopback address
2133 if (ipv6_addr_loopback(addr))
2134 return snprintf(buf, buflen, "::1");
2137 * RFC 4291, Section 2.2.3
2139 * Special presentation address format for mapped v4
2142 if (ipv6_addr_v4mapped(addr))
2143 return snprintf(buf, buflen, "::ffff:%pI4",
2144 &addr->s6_addr32[3]);
2147 * RFC 4291, Section 2.2.1
2149 return snprintf(buf, buflen, "%pI6c", addr);
2152 /* Derived from rpc_sockaddr2uaddr */
2154 ff_layout_encode_netaddr(struct xdr_stream *xdr, struct nfs4_pnfs_ds_addr *da)
2156 struct sockaddr *sap = (struct sockaddr *)&da->da_addr;
2157 char portbuf[RPCBIND_MAXUADDRPLEN];
2158 char addrbuf[RPCBIND_MAXUADDRLEN];
2160 unsigned short port;
2164 switch (sap->sa_family) {
2166 if (ff_layout_ntop4(sap, addrbuf, sizeof(addrbuf)) == 0)
2168 port = ntohs(((struct sockaddr_in *)sap)->sin_port);
2173 if (ff_layout_ntop6_noscopeid(sap, addrbuf, sizeof(addrbuf)) == 0)
2175 port = ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
2180 /* we only support tcp and tcp6 */
2185 snprintf(portbuf, sizeof(portbuf), ".%u.%u", port >> 8, port & 0xff);
2186 len = strlcat(addrbuf, portbuf, sizeof(addrbuf));
2188 p = xdr_reserve_space(xdr, 4 + netid_len);
2189 xdr_encode_opaque(p, netid, netid_len);
2191 p = xdr_reserve_space(xdr, 4 + len);
2192 xdr_encode_opaque(p, addrbuf, len);
2196 ff_layout_encode_nfstime(struct xdr_stream *xdr,
2199 struct timespec64 ts;
2202 p = xdr_reserve_space(xdr, 12);
2203 ts = ktime_to_timespec64(t);
2204 p = xdr_encode_hyper(p, ts.tv_sec);
2205 *p++ = cpu_to_be32(ts.tv_nsec);
2209 ff_layout_encode_io_latency(struct xdr_stream *xdr,
2210 struct nfs4_ff_io_stat *stat)
2214 p = xdr_reserve_space(xdr, 5 * 8);
2215 p = xdr_encode_hyper(p, stat->ops_requested);
2216 p = xdr_encode_hyper(p, stat->bytes_requested);
2217 p = xdr_encode_hyper(p, stat->ops_completed);
2218 p = xdr_encode_hyper(p, stat->bytes_completed);
2219 p = xdr_encode_hyper(p, stat->bytes_not_delivered);
2220 ff_layout_encode_nfstime(xdr, stat->total_busy_time);
2221 ff_layout_encode_nfstime(xdr, stat->aggregate_completion_time);
2225 ff_layout_encode_ff_layoutupdate(struct xdr_stream *xdr,
2226 const struct nfs42_layoutstat_devinfo *devinfo,
2227 struct nfs4_ff_layout_mirror *mirror)
2229 struct nfs4_pnfs_ds_addr *da;
2230 struct nfs4_pnfs_ds *ds = mirror->mirror_ds->ds;
2231 struct nfs_fh *fh = &mirror->fh_versions[0];
2234 da = list_first_entry(&ds->ds_addrs, struct nfs4_pnfs_ds_addr, da_node);
2235 dprintk("%s: DS %s: encoding address %s\n",
2236 __func__, ds->ds_remotestr, da->da_remotestr);
2238 ff_layout_encode_netaddr(xdr, da);
2240 p = xdr_reserve_space(xdr, 4 + fh->size);
2241 xdr_encode_opaque(p, fh->data, fh->size);
2242 /* ff_io_latency4 read */
2243 spin_lock(&mirror->lock);
2244 ff_layout_encode_io_latency(xdr, &mirror->read_stat.io_stat);
2245 /* ff_io_latency4 write */
2246 ff_layout_encode_io_latency(xdr, &mirror->write_stat.io_stat);
2247 spin_unlock(&mirror->lock);
2249 ff_layout_encode_nfstime(xdr, ktime_sub(ktime_get(), mirror->start_time));
2251 p = xdr_reserve_space(xdr, 4);
2252 *p = cpu_to_be32(false);
2256 ff_layout_encode_layoutstats(struct xdr_stream *xdr, const void *args,
2257 const struct nfs4_xdr_opaque_data *opaque)
2259 struct nfs42_layoutstat_devinfo *devinfo = container_of(opaque,
2260 struct nfs42_layoutstat_devinfo, ld_private);
2263 /* layoutupdate length */
2264 start = xdr_reserve_space(xdr, 4);
2265 ff_layout_encode_ff_layoutupdate(xdr, devinfo, opaque->data);
2267 *start = cpu_to_be32((xdr->p - start - 1) * 4);
2271 ff_layout_free_layoutstats(struct nfs4_xdr_opaque_data *opaque)
2273 struct nfs4_ff_layout_mirror *mirror = opaque->data;
2275 ff_layout_put_mirror(mirror);
2278 static const struct nfs4_xdr_opaque_ops layoutstat_ops = {
2279 .encode = ff_layout_encode_layoutstats,
2280 .free = ff_layout_free_layoutstats,
2284 ff_layout_mirror_prepare_stats(struct pnfs_layout_hdr *lo,
2285 struct nfs42_layoutstat_devinfo *devinfo,
2288 struct nfs4_flexfile_layout *ff_layout = FF_LAYOUT_FROM_HDR(lo);
2289 struct nfs4_ff_layout_mirror *mirror;
2290 struct nfs4_deviceid_node *dev;
2293 list_for_each_entry(mirror, &ff_layout->mirrors, mirrors) {
2296 if (IS_ERR_OR_NULL(mirror->mirror_ds))
2298 if (!test_and_clear_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags))
2300 /* mirror refcount put in cleanup_layoutstats */
2301 if (!refcount_inc_not_zero(&mirror->ref))
2303 dev = &mirror->mirror_ds->id_node;
2304 memcpy(&devinfo->dev_id, &dev->deviceid, NFS4_DEVICEID4_SIZE);
2305 devinfo->offset = 0;
2306 devinfo->length = NFS4_MAX_UINT64;
2307 spin_lock(&mirror->lock);
2308 devinfo->read_count = mirror->read_stat.io_stat.ops_completed;
2309 devinfo->read_bytes = mirror->read_stat.io_stat.bytes_completed;
2310 devinfo->write_count = mirror->write_stat.io_stat.ops_completed;
2311 devinfo->write_bytes = mirror->write_stat.io_stat.bytes_completed;
2312 spin_unlock(&mirror->lock);
2313 devinfo->layout_type = LAYOUT_FLEX_FILES;
2314 devinfo->ld_private.ops = &layoutstat_ops;
2315 devinfo->ld_private.data = mirror;
2324 ff_layout_prepare_layoutstats(struct nfs42_layoutstat_args *args)
2326 struct nfs4_flexfile_layout *ff_layout;
2327 const int dev_count = PNFS_LAYOUTSTATS_MAXDEV;
2329 /* For now, send at most PNFS_LAYOUTSTATS_MAXDEV statistics */
2330 args->devinfo = kmalloc_array(dev_count, sizeof(*args->devinfo), GFP_NOIO);
2334 spin_lock(&args->inode->i_lock);
2335 ff_layout = FF_LAYOUT_FROM_HDR(NFS_I(args->inode)->layout);
2336 args->num_dev = ff_layout_mirror_prepare_stats(&ff_layout->generic_hdr,
2337 &args->devinfo[0], dev_count);
2338 spin_unlock(&args->inode->i_lock);
2339 if (!args->num_dev) {
2340 kfree(args->devinfo);
2341 args->devinfo = NULL;
2349 ff_layout_set_layoutdriver(struct nfs_server *server,
2350 const struct nfs_fh *dummy)
2352 #if IS_ENABLED(CONFIG_NFS_V4_2)
2353 server->caps |= NFS_CAP_LAYOUTSTATS;
2358 static struct pnfs_layoutdriver_type flexfilelayout_type = {
2359 .id = LAYOUT_FLEX_FILES,
2360 .name = "LAYOUT_FLEX_FILES",
2361 .owner = THIS_MODULE,
2362 .flags = PNFS_LAYOUTGET_ON_OPEN,
2363 .max_layoutget_response = 4096, /* 1 page or so... */
2364 .set_layoutdriver = ff_layout_set_layoutdriver,
2365 .alloc_layout_hdr = ff_layout_alloc_layout_hdr,
2366 .free_layout_hdr = ff_layout_free_layout_hdr,
2367 .alloc_lseg = ff_layout_alloc_lseg,
2368 .free_lseg = ff_layout_free_lseg,
2369 .add_lseg = ff_layout_add_lseg,
2370 .pg_read_ops = &ff_layout_pg_read_ops,
2371 .pg_write_ops = &ff_layout_pg_write_ops,
2372 .get_ds_info = ff_layout_get_ds_info,
2373 .free_deviceid_node = ff_layout_free_deviceid_node,
2374 .mark_request_commit = pnfs_layout_mark_request_commit,
2375 .clear_request_commit = pnfs_generic_clear_request_commit,
2376 .scan_commit_lists = pnfs_generic_scan_commit_lists,
2377 .recover_commit_reqs = pnfs_generic_recover_commit_reqs,
2378 .commit_pagelist = ff_layout_commit_pagelist,
2379 .read_pagelist = ff_layout_read_pagelist,
2380 .write_pagelist = ff_layout_write_pagelist,
2381 .alloc_deviceid_node = ff_layout_alloc_deviceid_node,
2382 .prepare_layoutreturn = ff_layout_prepare_layoutreturn,
2383 .sync = pnfs_nfs_generic_sync,
2384 .prepare_layoutstats = ff_layout_prepare_layoutstats,
2387 static int __init nfs4flexfilelayout_init(void)
2389 printk(KERN_INFO "%s: NFSv4 Flexfile Layout Driver Registering...\n",
2391 return pnfs_register_layoutdriver(&flexfilelayout_type);
2394 static void __exit nfs4flexfilelayout_exit(void)
2396 printk(KERN_INFO "%s: NFSv4 Flexfile Layout Driver Unregistering...\n",
2398 pnfs_unregister_layoutdriver(&flexfilelayout_type);
2401 MODULE_ALIAS("nfs-layouttype4-4");
2403 MODULE_LICENSE("GPL");
2404 MODULE_DESCRIPTION("The NFSv4 flexfile layout driver");
2406 module_init(nfs4flexfilelayout_init);
2407 module_exit(nfs4flexfilelayout_exit);