Merge git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf-next
[sfrench/cifs-2.6.git] / fs / nfs / pnfs.c
1 /*
2  *  pNFS functions to call and manage layout drivers.
3  *
4  *  Copyright (c) 2002 [year of first publication]
5  *  The Regents of the University of Michigan
6  *  All Rights Reserved
7  *
8  *  Dean Hildebrand <dhildebz@umich.edu>
9  *
10  *  Permission is granted to use, copy, create derivative works, and
11  *  redistribute this software and such derivative works for any purpose,
12  *  so long as the name of the University of Michigan is not used in
13  *  any advertising or publicity pertaining to the use or distribution
14  *  of this software without specific, written prior authorization. If
15  *  the above copyright notice or any other identification of the
16  *  University of Michigan is included in any copy of any portion of
17  *  this software, then the disclaimer below must also be included.
18  *
19  *  This software is provided as is, without representation or warranty
20  *  of any kind either express or implied, including without limitation
21  *  the implied warranties of merchantability, fitness for a particular
22  *  purpose, or noninfringement.  The Regents of the University of
23  *  Michigan shall not be liable for any damages, including special,
24  *  indirect, incidental, or consequential damages, with respect to any
25  *  claim arising out of or in connection with the use of the software,
26  *  even if it has been or is hereafter advised of the possibility of
27  *  such damages.
28  */
29
30 #include <linux/nfs_fs.h>
31 #include <linux/nfs_page.h>
32 #include <linux/module.h>
33 #include <linux/sort.h>
34 #include "internal.h"
35 #include "pnfs.h"
36 #include "iostat.h"
37 #include "nfs4trace.h"
38 #include "delegation.h"
39 #include "nfs42.h"
40 #include "nfs4_fs.h"
41
42 #define NFSDBG_FACILITY         NFSDBG_PNFS
43 #define PNFS_LAYOUTGET_RETRY_TIMEOUT (120*HZ)
44
45 /* Locking:
46  *
47  * pnfs_spinlock:
48  *      protects pnfs_modules_tbl.
49  */
50 static DEFINE_SPINLOCK(pnfs_spinlock);
51
52 /*
53  * pnfs_modules_tbl holds all pnfs modules
54  */
55 static LIST_HEAD(pnfs_modules_tbl);
56
57 static void pnfs_layoutreturn_before_put_layout_hdr(struct pnfs_layout_hdr *lo);
58 static void pnfs_free_returned_lsegs(struct pnfs_layout_hdr *lo,
59                 struct list_head *free_me,
60                 const struct pnfs_layout_range *range,
61                 u32 seq);
62 static bool pnfs_lseg_dec_and_remove_zero(struct pnfs_layout_segment *lseg,
63                                 struct list_head *tmp_list);
64
65 /* Return the registered pnfs layout driver module matching given id */
66 static struct pnfs_layoutdriver_type *
67 find_pnfs_driver_locked(u32 id)
68 {
69         struct pnfs_layoutdriver_type *local;
70
71         list_for_each_entry(local, &pnfs_modules_tbl, pnfs_tblid)
72                 if (local->id == id)
73                         goto out;
74         local = NULL;
75 out:
76         dprintk("%s: Searching for id %u, found %p\n", __func__, id, local);
77         return local;
78 }
79
80 static struct pnfs_layoutdriver_type *
81 find_pnfs_driver(u32 id)
82 {
83         struct pnfs_layoutdriver_type *local;
84
85         spin_lock(&pnfs_spinlock);
86         local = find_pnfs_driver_locked(id);
87         if (local != NULL && !try_module_get(local->owner)) {
88                 dprintk("%s: Could not grab reference on module\n", __func__);
89                 local = NULL;
90         }
91         spin_unlock(&pnfs_spinlock);
92         return local;
93 }
94
95 void
96 unset_pnfs_layoutdriver(struct nfs_server *nfss)
97 {
98         if (nfss->pnfs_curr_ld) {
99                 if (nfss->pnfs_curr_ld->clear_layoutdriver)
100                         nfss->pnfs_curr_ld->clear_layoutdriver(nfss);
101                 /* Decrement the MDS count. Purge the deviceid cache if zero */
102                 if (atomic_dec_and_test(&nfss->nfs_client->cl_mds_count))
103                         nfs4_deviceid_purge_client(nfss->nfs_client);
104                 module_put(nfss->pnfs_curr_ld->owner);
105         }
106         nfss->pnfs_curr_ld = NULL;
107 }
108
109 /*
110  * When the server sends a list of layout types, we choose one in the order
111  * given in the list below.
112  *
113  * FIXME: should this list be configurable in some fashion? module param?
114  *        mount option? something else?
115  */
116 static const u32 ld_prefs[] = {
117         LAYOUT_SCSI,
118         LAYOUT_BLOCK_VOLUME,
119         LAYOUT_OSD2_OBJECTS,
120         LAYOUT_FLEX_FILES,
121         LAYOUT_NFSV4_1_FILES,
122         0
123 };
124
125 static int
126 ld_cmp(const void *e1, const void *e2)
127 {
128         u32 ld1 = *((u32 *)e1);
129         u32 ld2 = *((u32 *)e2);
130         int i;
131
132         for (i = 0; ld_prefs[i] != 0; i++) {
133                 if (ld1 == ld_prefs[i])
134                         return -1;
135
136                 if (ld2 == ld_prefs[i])
137                         return 1;
138         }
139         return 0;
140 }
141
142 /*
143  * Try to set the server's pnfs module to the pnfs layout type specified by id.
144  * Currently only one pNFS layout driver per filesystem is supported.
145  *
146  * @ids array of layout types supported by MDS.
147  */
148 void
149 set_pnfs_layoutdriver(struct nfs_server *server, const struct nfs_fh *mntfh,
150                       struct nfs_fsinfo *fsinfo)
151 {
152         struct pnfs_layoutdriver_type *ld_type = NULL;
153         u32 id;
154         int i;
155
156         if (fsinfo->nlayouttypes == 0)
157                 goto out_no_driver;
158         if (!(server->nfs_client->cl_exchange_flags &
159                  (EXCHGID4_FLAG_USE_NON_PNFS | EXCHGID4_FLAG_USE_PNFS_MDS))) {
160                 printk(KERN_ERR "NFS: %s: cl_exchange_flags 0x%x\n",
161                         __func__, server->nfs_client->cl_exchange_flags);
162                 goto out_no_driver;
163         }
164
165         sort(fsinfo->layouttype, fsinfo->nlayouttypes,
166                 sizeof(*fsinfo->layouttype), ld_cmp, NULL);
167
168         for (i = 0; i < fsinfo->nlayouttypes; i++) {
169                 id = fsinfo->layouttype[i];
170                 ld_type = find_pnfs_driver(id);
171                 if (!ld_type) {
172                         request_module("%s-%u", LAYOUT_NFSV4_1_MODULE_PREFIX,
173                                         id);
174                         ld_type = find_pnfs_driver(id);
175                 }
176                 if (ld_type)
177                         break;
178         }
179
180         if (!ld_type) {
181                 dprintk("%s: No pNFS module found!\n", __func__);
182                 goto out_no_driver;
183         }
184
185         server->pnfs_curr_ld = ld_type;
186         if (ld_type->set_layoutdriver
187             && ld_type->set_layoutdriver(server, mntfh)) {
188                 printk(KERN_ERR "NFS: %s: Error initializing pNFS layout "
189                         "driver %u.\n", __func__, id);
190                 module_put(ld_type->owner);
191                 goto out_no_driver;
192         }
193         /* Bump the MDS count */
194         atomic_inc(&server->nfs_client->cl_mds_count);
195
196         dprintk("%s: pNFS module for %u set\n", __func__, id);
197         return;
198
199 out_no_driver:
200         dprintk("%s: Using NFSv4 I/O\n", __func__);
201         server->pnfs_curr_ld = NULL;
202 }
203
204 int
205 pnfs_register_layoutdriver(struct pnfs_layoutdriver_type *ld_type)
206 {
207         int status = -EINVAL;
208         struct pnfs_layoutdriver_type *tmp;
209
210         if (ld_type->id == 0) {
211                 printk(KERN_ERR "NFS: %s id 0 is reserved\n", __func__);
212                 return status;
213         }
214         if (!ld_type->alloc_lseg || !ld_type->free_lseg) {
215                 printk(KERN_ERR "NFS: %s Layout driver must provide "
216                        "alloc_lseg and free_lseg.\n", __func__);
217                 return status;
218         }
219
220         spin_lock(&pnfs_spinlock);
221         tmp = find_pnfs_driver_locked(ld_type->id);
222         if (!tmp) {
223                 list_add(&ld_type->pnfs_tblid, &pnfs_modules_tbl);
224                 status = 0;
225                 dprintk("%s Registering id:%u name:%s\n", __func__, ld_type->id,
226                         ld_type->name);
227         } else {
228                 printk(KERN_ERR "NFS: %s Module with id %d already loaded!\n",
229                         __func__, ld_type->id);
230         }
231         spin_unlock(&pnfs_spinlock);
232
233         return status;
234 }
235 EXPORT_SYMBOL_GPL(pnfs_register_layoutdriver);
236
237 void
238 pnfs_unregister_layoutdriver(struct pnfs_layoutdriver_type *ld_type)
239 {
240         dprintk("%s Deregistering id:%u\n", __func__, ld_type->id);
241         spin_lock(&pnfs_spinlock);
242         list_del(&ld_type->pnfs_tblid);
243         spin_unlock(&pnfs_spinlock);
244 }
245 EXPORT_SYMBOL_GPL(pnfs_unregister_layoutdriver);
246
247 /*
248  * pNFS client layout cache
249  */
250
251 /* Need to hold i_lock if caller does not already hold reference */
252 void
253 pnfs_get_layout_hdr(struct pnfs_layout_hdr *lo)
254 {
255         refcount_inc(&lo->plh_refcount);
256 }
257
258 static struct pnfs_layout_hdr *
259 pnfs_alloc_layout_hdr(struct inode *ino, gfp_t gfp_flags)
260 {
261         struct pnfs_layoutdriver_type *ld = NFS_SERVER(ino)->pnfs_curr_ld;
262         return ld->alloc_layout_hdr(ino, gfp_flags);
263 }
264
265 static void
266 pnfs_free_layout_hdr(struct pnfs_layout_hdr *lo)
267 {
268         struct nfs_server *server = NFS_SERVER(lo->plh_inode);
269         struct pnfs_layoutdriver_type *ld = server->pnfs_curr_ld;
270
271         if (!list_empty(&lo->plh_layouts)) {
272                 struct nfs_client *clp = server->nfs_client;
273
274                 spin_lock(&clp->cl_lock);
275                 list_del_init(&lo->plh_layouts);
276                 spin_unlock(&clp->cl_lock);
277         }
278         put_rpccred(lo->plh_lc_cred);
279         return ld->free_layout_hdr(lo);
280 }
281
282 static void
283 pnfs_detach_layout_hdr(struct pnfs_layout_hdr *lo)
284 {
285         struct nfs_inode *nfsi = NFS_I(lo->plh_inode);
286         dprintk("%s: freeing layout cache %p\n", __func__, lo);
287         nfsi->layout = NULL;
288         /* Reset MDS Threshold I/O counters */
289         nfsi->write_io = 0;
290         nfsi->read_io = 0;
291 }
292
293 void
294 pnfs_put_layout_hdr(struct pnfs_layout_hdr *lo)
295 {
296         struct inode *inode;
297
298         if (!lo)
299                 return;
300         inode = lo->plh_inode;
301         pnfs_layoutreturn_before_put_layout_hdr(lo);
302
303         if (refcount_dec_and_lock(&lo->plh_refcount, &inode->i_lock)) {
304                 if (!list_empty(&lo->plh_segs))
305                         WARN_ONCE(1, "NFS: BUG unfreed layout segments.\n");
306                 pnfs_detach_layout_hdr(lo);
307                 spin_unlock(&inode->i_lock);
308                 pnfs_free_layout_hdr(lo);
309         }
310 }
311
312 static void
313 pnfs_set_plh_return_info(struct pnfs_layout_hdr *lo, enum pnfs_iomode iomode,
314                          u32 seq)
315 {
316         if (lo->plh_return_iomode != 0 && lo->plh_return_iomode != iomode)
317                 iomode = IOMODE_ANY;
318         lo->plh_return_iomode = iomode;
319         set_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags);
320         if (seq != 0) {
321                 WARN_ON_ONCE(lo->plh_return_seq != 0 && lo->plh_return_seq != seq);
322                 lo->plh_return_seq = seq;
323         }
324 }
325
326 static void
327 pnfs_clear_layoutreturn_info(struct pnfs_layout_hdr *lo)
328 {
329         struct pnfs_layout_segment *lseg;
330         lo->plh_return_iomode = 0;
331         lo->plh_return_seq = 0;
332         clear_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags);
333         list_for_each_entry(lseg, &lo->plh_segs, pls_list) {
334                 if (!test_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags))
335                         continue;
336                 pnfs_set_plh_return_info(lo, lseg->pls_range.iomode, 0);
337         }
338 }
339
340 static void pnfs_clear_layoutreturn_waitbit(struct pnfs_layout_hdr *lo)
341 {
342         clear_bit_unlock(NFS_LAYOUT_RETURN, &lo->plh_flags);
343         clear_bit(NFS_LAYOUT_RETURN_LOCK, &lo->plh_flags);
344         smp_mb__after_atomic();
345         wake_up_bit(&lo->plh_flags, NFS_LAYOUT_RETURN);
346         rpc_wake_up(&NFS_SERVER(lo->plh_inode)->roc_rpcwaitq);
347 }
348
349 static void
350 pnfs_clear_lseg_state(struct pnfs_layout_segment *lseg,
351                 struct list_head *free_me)
352 {
353         clear_bit(NFS_LSEG_ROC, &lseg->pls_flags);
354         clear_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags);
355         if (test_and_clear_bit(NFS_LSEG_VALID, &lseg->pls_flags))
356                 pnfs_lseg_dec_and_remove_zero(lseg, free_me);
357         if (test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT, &lseg->pls_flags))
358                 pnfs_lseg_dec_and_remove_zero(lseg, free_me);
359 }
360
361 /*
362  * Update the seqid of a layout stateid
363  */
364 bool nfs4_layoutreturn_refresh_stateid(nfs4_stateid *dst,
365                 struct pnfs_layout_range *dst_range,
366                 struct inode *inode)
367 {
368         struct pnfs_layout_hdr *lo;
369         struct pnfs_layout_range range = {
370                 .iomode = IOMODE_ANY,
371                 .offset = 0,
372                 .length = NFS4_MAX_UINT64,
373         };
374         bool ret = false;
375         LIST_HEAD(head);
376         int err;
377
378         spin_lock(&inode->i_lock);
379         lo = NFS_I(inode)->layout;
380         if (lo && nfs4_stateid_match_other(dst, &lo->plh_stateid)) {
381                 err = pnfs_mark_matching_lsegs_return(lo, &head, &range, 0);
382                 if (err != -EBUSY) {
383                         dst->seqid = lo->plh_stateid.seqid;
384                         *dst_range = range;
385                         ret = true;
386                 }
387         }
388         spin_unlock(&inode->i_lock);
389         pnfs_free_lseg_list(&head);
390         return ret;
391 }
392
393 /*
394  * Mark a pnfs_layout_hdr and all associated layout segments as invalid
395  *
396  * In order to continue using the pnfs_layout_hdr, a full recovery
397  * is required.
398  * Note that caller must hold inode->i_lock.
399  */
400 int
401 pnfs_mark_layout_stateid_invalid(struct pnfs_layout_hdr *lo,
402                 struct list_head *lseg_list)
403 {
404         struct pnfs_layout_range range = {
405                 .iomode = IOMODE_ANY,
406                 .offset = 0,
407                 .length = NFS4_MAX_UINT64,
408         };
409         struct pnfs_layout_segment *lseg, *next;
410
411         set_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags);
412         list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list)
413                 pnfs_clear_lseg_state(lseg, lseg_list);
414         pnfs_clear_layoutreturn_info(lo);
415         pnfs_free_returned_lsegs(lo, lseg_list, &range, 0);
416         if (test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags) &&
417             !test_and_set_bit(NFS_LAYOUT_RETURN_LOCK, &lo->plh_flags))
418                 pnfs_clear_layoutreturn_waitbit(lo);
419         return !list_empty(&lo->plh_segs);
420 }
421
422 static int
423 pnfs_iomode_to_fail_bit(u32 iomode)
424 {
425         return iomode == IOMODE_RW ?
426                 NFS_LAYOUT_RW_FAILED : NFS_LAYOUT_RO_FAILED;
427 }
428
429 static void
430 pnfs_layout_set_fail_bit(struct pnfs_layout_hdr *lo, int fail_bit)
431 {
432         lo->plh_retry_timestamp = jiffies;
433         if (!test_and_set_bit(fail_bit, &lo->plh_flags))
434                 refcount_inc(&lo->plh_refcount);
435 }
436
437 static void
438 pnfs_layout_clear_fail_bit(struct pnfs_layout_hdr *lo, int fail_bit)
439 {
440         if (test_and_clear_bit(fail_bit, &lo->plh_flags))
441                 refcount_dec(&lo->plh_refcount);
442 }
443
444 static void
445 pnfs_layout_io_set_failed(struct pnfs_layout_hdr *lo, u32 iomode)
446 {
447         struct inode *inode = lo->plh_inode;
448         struct pnfs_layout_range range = {
449                 .iomode = iomode,
450                 .offset = 0,
451                 .length = NFS4_MAX_UINT64,
452         };
453         LIST_HEAD(head);
454
455         spin_lock(&inode->i_lock);
456         pnfs_layout_set_fail_bit(lo, pnfs_iomode_to_fail_bit(iomode));
457         pnfs_mark_matching_lsegs_invalid(lo, &head, &range, 0);
458         spin_unlock(&inode->i_lock);
459         pnfs_free_lseg_list(&head);
460         dprintk("%s Setting layout IOMODE_%s fail bit\n", __func__,
461                         iomode == IOMODE_RW ?  "RW" : "READ");
462 }
463
464 static bool
465 pnfs_layout_io_test_failed(struct pnfs_layout_hdr *lo, u32 iomode)
466 {
467         unsigned long start, end;
468         int fail_bit = pnfs_iomode_to_fail_bit(iomode);
469
470         if (test_bit(fail_bit, &lo->plh_flags) == 0)
471                 return false;
472         end = jiffies;
473         start = end - PNFS_LAYOUTGET_RETRY_TIMEOUT;
474         if (!time_in_range(lo->plh_retry_timestamp, start, end)) {
475                 /* It is time to retry the failed layoutgets */
476                 pnfs_layout_clear_fail_bit(lo, fail_bit);
477                 return false;
478         }
479         return true;
480 }
481
482 static void
483 pnfs_init_lseg(struct pnfs_layout_hdr *lo, struct pnfs_layout_segment *lseg,
484                 const struct pnfs_layout_range *range,
485                 const nfs4_stateid *stateid)
486 {
487         INIT_LIST_HEAD(&lseg->pls_list);
488         INIT_LIST_HEAD(&lseg->pls_lc_list);
489         refcount_set(&lseg->pls_refcount, 1);
490         set_bit(NFS_LSEG_VALID, &lseg->pls_flags);
491         lseg->pls_layout = lo;
492         lseg->pls_range = *range;
493         lseg->pls_seq = be32_to_cpu(stateid->seqid);
494 }
495
496 static void pnfs_free_lseg(struct pnfs_layout_segment *lseg)
497 {
498         if (lseg != NULL) {
499                 struct inode *inode = lseg->pls_layout->plh_inode;
500                 NFS_SERVER(inode)->pnfs_curr_ld->free_lseg(lseg);
501         }
502 }
503
504 static void
505 pnfs_layout_remove_lseg(struct pnfs_layout_hdr *lo,
506                 struct pnfs_layout_segment *lseg)
507 {
508         WARN_ON(test_bit(NFS_LSEG_VALID, &lseg->pls_flags));
509         list_del_init(&lseg->pls_list);
510         /* Matched by pnfs_get_layout_hdr in pnfs_layout_insert_lseg */
511         refcount_dec(&lo->plh_refcount);
512         if (test_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags))
513                 return;
514         if (list_empty(&lo->plh_segs) &&
515             !test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags) &&
516             !test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags)) {
517                 if (atomic_read(&lo->plh_outstanding) == 0)
518                         set_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags);
519                 clear_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags);
520         }
521 }
522
523 static bool
524 pnfs_cache_lseg_for_layoutreturn(struct pnfs_layout_hdr *lo,
525                 struct pnfs_layout_segment *lseg)
526 {
527         if (test_and_clear_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags) &&
528             pnfs_layout_is_valid(lo)) {
529                 pnfs_set_plh_return_info(lo, lseg->pls_range.iomode, 0);
530                 list_move_tail(&lseg->pls_list, &lo->plh_return_segs);
531                 return true;
532         }
533         return false;
534 }
535
536 void
537 pnfs_put_lseg(struct pnfs_layout_segment *lseg)
538 {
539         struct pnfs_layout_hdr *lo;
540         struct inode *inode;
541
542         if (!lseg)
543                 return;
544
545         dprintk("%s: lseg %p ref %d valid %d\n", __func__, lseg,
546                 refcount_read(&lseg->pls_refcount),
547                 test_bit(NFS_LSEG_VALID, &lseg->pls_flags));
548
549         lo = lseg->pls_layout;
550         inode = lo->plh_inode;
551
552         if (refcount_dec_and_lock(&lseg->pls_refcount, &inode->i_lock)) {
553                 if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags)) {
554                         spin_unlock(&inode->i_lock);
555                         return;
556                 }
557                 pnfs_get_layout_hdr(lo);
558                 pnfs_layout_remove_lseg(lo, lseg);
559                 if (pnfs_cache_lseg_for_layoutreturn(lo, lseg))
560                         lseg = NULL;
561                 spin_unlock(&inode->i_lock);
562                 pnfs_free_lseg(lseg);
563                 pnfs_put_layout_hdr(lo);
564         }
565 }
566 EXPORT_SYMBOL_GPL(pnfs_put_lseg);
567
568 /*
569  * is l2 fully contained in l1?
570  *   start1                             end1
571  *   [----------------------------------)
572  *           start2           end2
573  *           [----------------)
574  */
575 static bool
576 pnfs_lseg_range_contained(const struct pnfs_layout_range *l1,
577                  const struct pnfs_layout_range *l2)
578 {
579         u64 start1 = l1->offset;
580         u64 end1 = pnfs_end_offset(start1, l1->length);
581         u64 start2 = l2->offset;
582         u64 end2 = pnfs_end_offset(start2, l2->length);
583
584         return (start1 <= start2) && (end1 >= end2);
585 }
586
587 static bool pnfs_lseg_dec_and_remove_zero(struct pnfs_layout_segment *lseg,
588                 struct list_head *tmp_list)
589 {
590         if (!refcount_dec_and_test(&lseg->pls_refcount))
591                 return false;
592         pnfs_layout_remove_lseg(lseg->pls_layout, lseg);
593         list_add(&lseg->pls_list, tmp_list);
594         return true;
595 }
596
597 /* Returns 1 if lseg is removed from list, 0 otherwise */
598 static int mark_lseg_invalid(struct pnfs_layout_segment *lseg,
599                              struct list_head *tmp_list)
600 {
601         int rv = 0;
602
603         if (test_and_clear_bit(NFS_LSEG_VALID, &lseg->pls_flags)) {
604                 /* Remove the reference keeping the lseg in the
605                  * list.  It will now be removed when all
606                  * outstanding io is finished.
607                  */
608                 dprintk("%s: lseg %p ref %d\n", __func__, lseg,
609                         refcount_read(&lseg->pls_refcount));
610                 if (pnfs_lseg_dec_and_remove_zero(lseg, tmp_list))
611                         rv = 1;
612         }
613         return rv;
614 }
615
616 /*
617  * Compare 2 layout stateid sequence ids, to see which is newer,
618  * taking into account wraparound issues.
619  */
620 static bool pnfs_seqid_is_newer(u32 s1, u32 s2)
621 {
622         return (s32)(s1 - s2) > 0;
623 }
624
625 static bool
626 pnfs_should_free_range(const struct pnfs_layout_range *lseg_range,
627                  const struct pnfs_layout_range *recall_range)
628 {
629         return (recall_range->iomode == IOMODE_ANY ||
630                 lseg_range->iomode == recall_range->iomode) &&
631                pnfs_lseg_range_intersecting(lseg_range, recall_range);
632 }
633
634 static bool
635 pnfs_match_lseg_recall(const struct pnfs_layout_segment *lseg,
636                 const struct pnfs_layout_range *recall_range,
637                 u32 seq)
638 {
639         if (seq != 0 && pnfs_seqid_is_newer(lseg->pls_seq, seq))
640                 return false;
641         if (recall_range == NULL)
642                 return true;
643         return pnfs_should_free_range(&lseg->pls_range, recall_range);
644 }
645
646 /**
647  * pnfs_mark_matching_lsegs_invalid - tear down lsegs or mark them for later
648  * @lo: layout header containing the lsegs
649  * @tmp_list: list head where doomed lsegs should go
650  * @recall_range: optional recall range argument to match (may be NULL)
651  * @seq: only invalidate lsegs obtained prior to this sequence (may be 0)
652  *
653  * Walk the list of lsegs in the layout header, and tear down any that should
654  * be destroyed. If "recall_range" is specified then the segment must match
655  * that range. If "seq" is non-zero, then only match segments that were handed
656  * out at or before that sequence.
657  *
658  * Returns number of matching invalid lsegs remaining in list after scanning
659  * it and purging them.
660  */
661 int
662 pnfs_mark_matching_lsegs_invalid(struct pnfs_layout_hdr *lo,
663                             struct list_head *tmp_list,
664                             const struct pnfs_layout_range *recall_range,
665                             u32 seq)
666 {
667         struct pnfs_layout_segment *lseg, *next;
668         int remaining = 0;
669
670         dprintk("%s:Begin lo %p\n", __func__, lo);
671
672         if (list_empty(&lo->plh_segs))
673                 return 0;
674         list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list)
675                 if (pnfs_match_lseg_recall(lseg, recall_range, seq)) {
676                         dprintk("%s: freeing lseg %p iomode %d seq %u "
677                                 "offset %llu length %llu\n", __func__,
678                                 lseg, lseg->pls_range.iomode, lseg->pls_seq,
679                                 lseg->pls_range.offset, lseg->pls_range.length);
680                         if (!mark_lseg_invalid(lseg, tmp_list))
681                                 remaining++;
682                 }
683         dprintk("%s:Return %i\n", __func__, remaining);
684         return remaining;
685 }
686
687 static void
688 pnfs_free_returned_lsegs(struct pnfs_layout_hdr *lo,
689                 struct list_head *free_me,
690                 const struct pnfs_layout_range *range,
691                 u32 seq)
692 {
693         struct pnfs_layout_segment *lseg, *next;
694
695         list_for_each_entry_safe(lseg, next, &lo->plh_return_segs, pls_list) {
696                 if (pnfs_match_lseg_recall(lseg, range, seq))
697                         list_move_tail(&lseg->pls_list, free_me);
698         }
699 }
700
701 /* note free_me must contain lsegs from a single layout_hdr */
702 void
703 pnfs_free_lseg_list(struct list_head *free_me)
704 {
705         struct pnfs_layout_segment *lseg, *tmp;
706
707         if (list_empty(free_me))
708                 return;
709
710         list_for_each_entry_safe(lseg, tmp, free_me, pls_list) {
711                 list_del(&lseg->pls_list);
712                 pnfs_free_lseg(lseg);
713         }
714 }
715
716 void
717 pnfs_destroy_layout(struct nfs_inode *nfsi)
718 {
719         struct pnfs_layout_hdr *lo;
720         LIST_HEAD(tmp_list);
721
722         spin_lock(&nfsi->vfs_inode.i_lock);
723         lo = nfsi->layout;
724         if (lo) {
725                 pnfs_get_layout_hdr(lo);
726                 pnfs_mark_layout_stateid_invalid(lo, &tmp_list);
727                 pnfs_layout_clear_fail_bit(lo, NFS_LAYOUT_RO_FAILED);
728                 pnfs_layout_clear_fail_bit(lo, NFS_LAYOUT_RW_FAILED);
729                 spin_unlock(&nfsi->vfs_inode.i_lock);
730                 pnfs_free_lseg_list(&tmp_list);
731                 nfs_commit_inode(&nfsi->vfs_inode, 0);
732                 pnfs_put_layout_hdr(lo);
733         } else
734                 spin_unlock(&nfsi->vfs_inode.i_lock);
735 }
736 EXPORT_SYMBOL_GPL(pnfs_destroy_layout);
737
738 static bool
739 pnfs_layout_add_bulk_destroy_list(struct inode *inode,
740                 struct list_head *layout_list)
741 {
742         struct pnfs_layout_hdr *lo;
743         bool ret = false;
744
745         spin_lock(&inode->i_lock);
746         lo = NFS_I(inode)->layout;
747         if (lo != NULL && list_empty(&lo->plh_bulk_destroy)) {
748                 pnfs_get_layout_hdr(lo);
749                 list_add(&lo->plh_bulk_destroy, layout_list);
750                 ret = true;
751         }
752         spin_unlock(&inode->i_lock);
753         return ret;
754 }
755
756 /* Caller must hold rcu_read_lock and clp->cl_lock */
757 static int
758 pnfs_layout_bulk_destroy_byserver_locked(struct nfs_client *clp,
759                 struct nfs_server *server,
760                 struct list_head *layout_list)
761 {
762         struct pnfs_layout_hdr *lo, *next;
763         struct inode *inode;
764
765         list_for_each_entry_safe(lo, next, &server->layouts, plh_layouts) {
766                 if (test_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags))
767                         continue;
768                 inode = igrab(lo->plh_inode);
769                 if (inode == NULL)
770                         continue;
771                 list_del_init(&lo->plh_layouts);
772                 if (pnfs_layout_add_bulk_destroy_list(inode, layout_list))
773                         continue;
774                 rcu_read_unlock();
775                 spin_unlock(&clp->cl_lock);
776                 iput(inode);
777                 spin_lock(&clp->cl_lock);
778                 rcu_read_lock();
779                 return -EAGAIN;
780         }
781         return 0;
782 }
783
784 static int
785 pnfs_layout_free_bulk_destroy_list(struct list_head *layout_list,
786                 bool is_bulk_recall)
787 {
788         struct pnfs_layout_hdr *lo;
789         struct inode *inode;
790         LIST_HEAD(lseg_list);
791         int ret = 0;
792
793         while (!list_empty(layout_list)) {
794                 lo = list_entry(layout_list->next, struct pnfs_layout_hdr,
795                                 plh_bulk_destroy);
796                 dprintk("%s freeing layout for inode %lu\n", __func__,
797                         lo->plh_inode->i_ino);
798                 inode = lo->plh_inode;
799
800                 pnfs_layoutcommit_inode(inode, false);
801
802                 spin_lock(&inode->i_lock);
803                 list_del_init(&lo->plh_bulk_destroy);
804                 if (pnfs_mark_layout_stateid_invalid(lo, &lseg_list)) {
805                         if (is_bulk_recall)
806                                 set_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags);
807                         ret = -EAGAIN;
808                 }
809                 spin_unlock(&inode->i_lock);
810                 pnfs_free_lseg_list(&lseg_list);
811                 /* Free all lsegs that are attached to commit buckets */
812                 nfs_commit_inode(inode, 0);
813                 pnfs_put_layout_hdr(lo);
814                 iput(inode);
815         }
816         return ret;
817 }
818
819 int
820 pnfs_destroy_layouts_byfsid(struct nfs_client *clp,
821                 struct nfs_fsid *fsid,
822                 bool is_recall)
823 {
824         struct nfs_server *server;
825         LIST_HEAD(layout_list);
826
827         spin_lock(&clp->cl_lock);
828         rcu_read_lock();
829 restart:
830         list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
831                 if (memcmp(&server->fsid, fsid, sizeof(*fsid)) != 0)
832                         continue;
833                 if (pnfs_layout_bulk_destroy_byserver_locked(clp,
834                                 server,
835                                 &layout_list) != 0)
836                         goto restart;
837         }
838         rcu_read_unlock();
839         spin_unlock(&clp->cl_lock);
840
841         if (list_empty(&layout_list))
842                 return 0;
843         return pnfs_layout_free_bulk_destroy_list(&layout_list, is_recall);
844 }
845
846 int
847 pnfs_destroy_layouts_byclid(struct nfs_client *clp,
848                 bool is_recall)
849 {
850         struct nfs_server *server;
851         LIST_HEAD(layout_list);
852
853         spin_lock(&clp->cl_lock);
854         rcu_read_lock();
855 restart:
856         list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
857                 if (pnfs_layout_bulk_destroy_byserver_locked(clp,
858                                         server,
859                                         &layout_list) != 0)
860                         goto restart;
861         }
862         rcu_read_unlock();
863         spin_unlock(&clp->cl_lock);
864
865         if (list_empty(&layout_list))
866                 return 0;
867         return pnfs_layout_free_bulk_destroy_list(&layout_list, is_recall);
868 }
869
870 /*
871  * Called by the state manger to remove all layouts established under an
872  * expired lease.
873  */
874 void
875 pnfs_destroy_all_layouts(struct nfs_client *clp)
876 {
877         nfs4_deviceid_mark_client_invalid(clp);
878         nfs4_deviceid_purge_client(clp);
879
880         pnfs_destroy_layouts_byclid(clp, false);
881 }
882
883 /* update lo->plh_stateid with new if is more recent */
884 void
885 pnfs_set_layout_stateid(struct pnfs_layout_hdr *lo, const nfs4_stateid *new,
886                         bool update_barrier)
887 {
888         u32 oldseq, newseq, new_barrier = 0;
889
890         oldseq = be32_to_cpu(lo->plh_stateid.seqid);
891         newseq = be32_to_cpu(new->seqid);
892
893         if (!pnfs_layout_is_valid(lo)) {
894                 nfs4_stateid_copy(&lo->plh_stateid, new);
895                 lo->plh_barrier = newseq;
896                 pnfs_clear_layoutreturn_info(lo);
897                 clear_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags);
898                 return;
899         }
900         if (pnfs_seqid_is_newer(newseq, oldseq)) {
901                 nfs4_stateid_copy(&lo->plh_stateid, new);
902                 /*
903                  * Because of wraparound, we want to keep the barrier
904                  * "close" to the current seqids.
905                  */
906                 new_barrier = newseq - atomic_read(&lo->plh_outstanding);
907         }
908         if (update_barrier)
909                 new_barrier = be32_to_cpu(new->seqid);
910         else if (new_barrier == 0)
911                 return;
912         if (pnfs_seqid_is_newer(new_barrier, lo->plh_barrier))
913                 lo->plh_barrier = new_barrier;
914 }
915
916 static bool
917 pnfs_layout_stateid_blocked(const struct pnfs_layout_hdr *lo,
918                 const nfs4_stateid *stateid)
919 {
920         u32 seqid = be32_to_cpu(stateid->seqid);
921
922         return !pnfs_seqid_is_newer(seqid, lo->plh_barrier);
923 }
924
925 /* lget is set to 1 if called from inside send_layoutget call chain */
926 static bool
927 pnfs_layoutgets_blocked(const struct pnfs_layout_hdr *lo)
928 {
929         return lo->plh_block_lgets ||
930                 test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags);
931 }
932
933 static struct nfs_server *
934 pnfs_find_server(struct inode *inode, struct nfs_open_context *ctx)
935 {
936         struct nfs_server *server;
937
938         if (inode) {
939                 server = NFS_SERVER(inode);
940         } else {
941                 struct dentry *parent_dir = dget_parent(ctx->dentry);
942                 server = NFS_SERVER(parent_dir->d_inode);
943                 dput(parent_dir);
944         }
945         return server;
946 }
947
948 static void nfs4_free_pages(struct page **pages, size_t size)
949 {
950         int i;
951
952         if (!pages)
953                 return;
954
955         for (i = 0; i < size; i++) {
956                 if (!pages[i])
957                         break;
958                 __free_page(pages[i]);
959         }
960         kfree(pages);
961 }
962
963 static struct page **nfs4_alloc_pages(size_t size, gfp_t gfp_flags)
964 {
965         struct page **pages;
966         int i;
967
968         pages = kcalloc(size, sizeof(struct page *), gfp_flags);
969         if (!pages) {
970                 dprintk("%s: can't alloc array of %zu pages\n", __func__, size);
971                 return NULL;
972         }
973
974         for (i = 0; i < size; i++) {
975                 pages[i] = alloc_page(gfp_flags);
976                 if (!pages[i]) {
977                         dprintk("%s: failed to allocate page\n", __func__);
978                         nfs4_free_pages(pages, size);
979                         return NULL;
980                 }
981         }
982
983         return pages;
984 }
985
986 static struct nfs4_layoutget *
987 pnfs_alloc_init_layoutget_args(struct inode *ino,
988            struct nfs_open_context *ctx,
989            const nfs4_stateid *stateid,
990            const struct pnfs_layout_range *range,
991            gfp_t gfp_flags)
992 {
993         struct nfs_server *server = pnfs_find_server(ino, ctx);
994         size_t max_pages = max_response_pages(server);
995         struct nfs4_layoutget *lgp;
996
997         dprintk("--> %s\n", __func__);
998
999         lgp = kzalloc(sizeof(*lgp), gfp_flags);
1000         if (lgp == NULL)
1001                 return NULL;
1002
1003         lgp->args.layout.pages = nfs4_alloc_pages(max_pages, gfp_flags);
1004         if (!lgp->args.layout.pages) {
1005                 kfree(lgp);
1006                 return NULL;
1007         }
1008         lgp->args.layout.pglen = max_pages * PAGE_SIZE;
1009         lgp->res.layoutp = &lgp->args.layout;
1010
1011         /* Don't confuse uninitialised result and success */
1012         lgp->res.status = -NFS4ERR_DELAY;
1013
1014         lgp->args.minlength = PAGE_SIZE;
1015         if (lgp->args.minlength > range->length)
1016                 lgp->args.minlength = range->length;
1017         if (ino) {
1018                 loff_t i_size = i_size_read(ino);
1019
1020                 if (range->iomode == IOMODE_READ) {
1021                         if (range->offset >= i_size)
1022                                 lgp->args.minlength = 0;
1023                         else if (i_size - range->offset < lgp->args.minlength)
1024                                 lgp->args.minlength = i_size - range->offset;
1025                 }
1026         }
1027         lgp->args.maxcount = PNFS_LAYOUT_MAXSIZE;
1028         pnfs_copy_range(&lgp->args.range, range);
1029         lgp->args.type = server->pnfs_curr_ld->id;
1030         lgp->args.inode = ino;
1031         lgp->args.ctx = get_nfs_open_context(ctx);
1032         nfs4_stateid_copy(&lgp->args.stateid, stateid);
1033         lgp->gfp_flags = gfp_flags;
1034         lgp->cred = get_rpccred(ctx->cred);
1035         return lgp;
1036 }
1037
1038 void pnfs_layoutget_free(struct nfs4_layoutget *lgp)
1039 {
1040         size_t max_pages = lgp->args.layout.pglen / PAGE_SIZE;
1041
1042         nfs4_free_pages(lgp->args.layout.pages, max_pages);
1043         if (lgp->args.inode)
1044                 pnfs_put_layout_hdr(NFS_I(lgp->args.inode)->layout);
1045         put_rpccred(lgp->cred);
1046         put_nfs_open_context(lgp->args.ctx);
1047         kfree(lgp);
1048 }
1049
1050 static void pnfs_clear_layoutcommit(struct inode *inode,
1051                 struct list_head *head)
1052 {
1053         struct nfs_inode *nfsi = NFS_I(inode);
1054         struct pnfs_layout_segment *lseg, *tmp;
1055
1056         if (!test_and_clear_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags))
1057                 return;
1058         list_for_each_entry_safe(lseg, tmp, &nfsi->layout->plh_segs, pls_list) {
1059                 if (!test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT, &lseg->pls_flags))
1060                         continue;
1061                 pnfs_lseg_dec_and_remove_zero(lseg, head);
1062         }
1063 }
1064
1065 void pnfs_layoutreturn_free_lsegs(struct pnfs_layout_hdr *lo,
1066                 const nfs4_stateid *arg_stateid,
1067                 const struct pnfs_layout_range *range,
1068                 const nfs4_stateid *stateid)
1069 {
1070         struct inode *inode = lo->plh_inode;
1071         LIST_HEAD(freeme);
1072
1073         spin_lock(&inode->i_lock);
1074         if (!pnfs_layout_is_valid(lo) || !arg_stateid ||
1075             !nfs4_stateid_match_other(&lo->plh_stateid, arg_stateid))
1076                 goto out_unlock;
1077         if (stateid) {
1078                 u32 seq = be32_to_cpu(arg_stateid->seqid);
1079
1080                 pnfs_mark_matching_lsegs_invalid(lo, &freeme, range, seq);
1081                 pnfs_free_returned_lsegs(lo, &freeme, range, seq);
1082                 pnfs_set_layout_stateid(lo, stateid, true);
1083         } else
1084                 pnfs_mark_layout_stateid_invalid(lo, &freeme);
1085 out_unlock:
1086         pnfs_clear_layoutreturn_waitbit(lo);
1087         spin_unlock(&inode->i_lock);
1088         pnfs_free_lseg_list(&freeme);
1089
1090 }
1091
1092 static bool
1093 pnfs_prepare_layoutreturn(struct pnfs_layout_hdr *lo,
1094                 nfs4_stateid *stateid,
1095                 enum pnfs_iomode *iomode)
1096 {
1097         /* Serialise LAYOUTGET/LAYOUTRETURN */
1098         if (atomic_read(&lo->plh_outstanding) != 0)
1099                 return false;
1100         if (test_and_set_bit(NFS_LAYOUT_RETURN_LOCK, &lo->plh_flags))
1101                 return false;
1102         set_bit(NFS_LAYOUT_RETURN, &lo->plh_flags);
1103         pnfs_get_layout_hdr(lo);
1104         if (test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags)) {
1105                 if (stateid != NULL) {
1106                         nfs4_stateid_copy(stateid, &lo->plh_stateid);
1107                         if (lo->plh_return_seq != 0)
1108                                 stateid->seqid = cpu_to_be32(lo->plh_return_seq);
1109                 }
1110                 if (iomode != NULL)
1111                         *iomode = lo->plh_return_iomode;
1112                 pnfs_clear_layoutreturn_info(lo);
1113                 return true;
1114         }
1115         if (stateid != NULL)
1116                 nfs4_stateid_copy(stateid, &lo->plh_stateid);
1117         if (iomode != NULL)
1118                 *iomode = IOMODE_ANY;
1119         return true;
1120 }
1121
1122 static void
1123 pnfs_init_layoutreturn_args(struct nfs4_layoutreturn_args *args,
1124                 struct pnfs_layout_hdr *lo,
1125                 const nfs4_stateid *stateid,
1126                 enum pnfs_iomode iomode)
1127 {
1128         struct inode *inode = lo->plh_inode;
1129
1130         args->layout_type = NFS_SERVER(inode)->pnfs_curr_ld->id;
1131         args->inode = inode;
1132         args->range.iomode = iomode;
1133         args->range.offset = 0;
1134         args->range.length = NFS4_MAX_UINT64;
1135         args->layout = lo;
1136         nfs4_stateid_copy(&args->stateid, stateid);
1137 }
1138
1139 static int
1140 pnfs_send_layoutreturn(struct pnfs_layout_hdr *lo, const nfs4_stateid *stateid,
1141                        enum pnfs_iomode iomode, bool sync)
1142 {
1143         struct inode *ino = lo->plh_inode;
1144         struct pnfs_layoutdriver_type *ld = NFS_SERVER(ino)->pnfs_curr_ld;
1145         struct nfs4_layoutreturn *lrp;
1146         int status = 0;
1147
1148         lrp = kzalloc(sizeof(*lrp), GFP_NOFS);
1149         if (unlikely(lrp == NULL)) {
1150                 status = -ENOMEM;
1151                 spin_lock(&ino->i_lock);
1152                 pnfs_clear_layoutreturn_waitbit(lo);
1153                 spin_unlock(&ino->i_lock);
1154                 pnfs_put_layout_hdr(lo);
1155                 goto out;
1156         }
1157
1158         pnfs_init_layoutreturn_args(&lrp->args, lo, stateid, iomode);
1159         lrp->args.ld_private = &lrp->ld_private;
1160         lrp->clp = NFS_SERVER(ino)->nfs_client;
1161         lrp->cred = lo->plh_lc_cred;
1162         if (ld->prepare_layoutreturn)
1163                 ld->prepare_layoutreturn(&lrp->args);
1164
1165         status = nfs4_proc_layoutreturn(lrp, sync);
1166 out:
1167         dprintk("<-- %s status: %d\n", __func__, status);
1168         return status;
1169 }
1170
1171 /* Return true if layoutreturn is needed */
1172 static bool
1173 pnfs_layout_need_return(struct pnfs_layout_hdr *lo)
1174 {
1175         struct pnfs_layout_segment *s;
1176         enum pnfs_iomode iomode;
1177         u32 seq;
1178
1179         if (!test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags))
1180                 return false;
1181
1182         seq = lo->plh_return_seq;
1183         iomode = lo->plh_return_iomode;
1184
1185         /* Defer layoutreturn until all recalled lsegs are done */
1186         list_for_each_entry(s, &lo->plh_segs, pls_list) {
1187                 if (seq && pnfs_seqid_is_newer(s->pls_seq, seq))
1188                         continue;
1189                 if (iomode != IOMODE_ANY && s->pls_range.iomode != iomode)
1190                         continue;
1191                 if (test_bit(NFS_LSEG_LAYOUTRETURN, &s->pls_flags))
1192                         return false;
1193         }
1194
1195         return true;
1196 }
1197
1198 static void pnfs_layoutreturn_before_put_layout_hdr(struct pnfs_layout_hdr *lo)
1199 {
1200         struct inode *inode= lo->plh_inode;
1201
1202         if (!test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags))
1203                 return;
1204         spin_lock(&inode->i_lock);
1205         if (pnfs_layout_need_return(lo)) {
1206                 nfs4_stateid stateid;
1207                 enum pnfs_iomode iomode;
1208                 bool send;
1209
1210                 send = pnfs_prepare_layoutreturn(lo, &stateid, &iomode);
1211                 spin_unlock(&inode->i_lock);
1212                 if (send) {
1213                         /* Send an async layoutreturn so we dont deadlock */
1214                         pnfs_send_layoutreturn(lo, &stateid, iomode, false);
1215                 }
1216         } else
1217                 spin_unlock(&inode->i_lock);
1218 }
1219
1220 /*
1221  * Initiates a LAYOUTRETURN(FILE), and removes the pnfs_layout_hdr
1222  * when the layout segment list is empty.
1223  *
1224  * Note that a pnfs_layout_hdr can exist with an empty layout segment
1225  * list when LAYOUTGET has failed, or when LAYOUTGET succeeded, but the
1226  * deviceid is marked invalid.
1227  */
1228 int
1229 _pnfs_return_layout(struct inode *ino)
1230 {
1231         struct pnfs_layout_hdr *lo = NULL;
1232         struct nfs_inode *nfsi = NFS_I(ino);
1233         LIST_HEAD(tmp_list);
1234         nfs4_stateid stateid;
1235         int status = 0;
1236         bool send, valid_layout;
1237
1238         dprintk("NFS: %s for inode %lu\n", __func__, ino->i_ino);
1239
1240         spin_lock(&ino->i_lock);
1241         lo = nfsi->layout;
1242         if (!lo) {
1243                 spin_unlock(&ino->i_lock);
1244                 dprintk("NFS: %s no layout to return\n", __func__);
1245                 goto out;
1246         }
1247         /* Reference matched in nfs4_layoutreturn_release */
1248         pnfs_get_layout_hdr(lo);
1249         /* Is there an outstanding layoutreturn ? */
1250         if (test_bit(NFS_LAYOUT_RETURN_LOCK, &lo->plh_flags)) {
1251                 spin_unlock(&ino->i_lock);
1252                 if (wait_on_bit(&lo->plh_flags, NFS_LAYOUT_RETURN,
1253                                         TASK_UNINTERRUPTIBLE))
1254                         goto out_put_layout_hdr;
1255                 spin_lock(&ino->i_lock);
1256         }
1257         valid_layout = pnfs_layout_is_valid(lo);
1258         pnfs_clear_layoutcommit(ino, &tmp_list);
1259         pnfs_mark_matching_lsegs_invalid(lo, &tmp_list, NULL, 0);
1260
1261         if (NFS_SERVER(ino)->pnfs_curr_ld->return_range) {
1262                 struct pnfs_layout_range range = {
1263                         .iomode         = IOMODE_ANY,
1264                         .offset         = 0,
1265                         .length         = NFS4_MAX_UINT64,
1266                 };
1267                 NFS_SERVER(ino)->pnfs_curr_ld->return_range(lo, &range);
1268         }
1269
1270         /* Don't send a LAYOUTRETURN if list was initially empty */
1271         if (!test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags) ||
1272                         !valid_layout) {
1273                 spin_unlock(&ino->i_lock);
1274                 dprintk("NFS: %s no layout segments to return\n", __func__);
1275                 goto out_put_layout_hdr;
1276         }
1277
1278         send = pnfs_prepare_layoutreturn(lo, &stateid, NULL);
1279         spin_unlock(&ino->i_lock);
1280         if (send)
1281                 status = pnfs_send_layoutreturn(lo, &stateid, IOMODE_ANY, true);
1282 out_put_layout_hdr:
1283         pnfs_free_lseg_list(&tmp_list);
1284         pnfs_put_layout_hdr(lo);
1285 out:
1286         dprintk("<-- %s status: %d\n", __func__, status);
1287         return status;
1288 }
1289
1290 int
1291 pnfs_commit_and_return_layout(struct inode *inode)
1292 {
1293         struct pnfs_layout_hdr *lo;
1294         int ret;
1295
1296         spin_lock(&inode->i_lock);
1297         lo = NFS_I(inode)->layout;
1298         if (lo == NULL) {
1299                 spin_unlock(&inode->i_lock);
1300                 return 0;
1301         }
1302         pnfs_get_layout_hdr(lo);
1303         /* Block new layoutgets and read/write to ds */
1304         lo->plh_block_lgets++;
1305         spin_unlock(&inode->i_lock);
1306         filemap_fdatawait(inode->i_mapping);
1307         ret = pnfs_layoutcommit_inode(inode, true);
1308         if (ret == 0)
1309                 ret = _pnfs_return_layout(inode);
1310         spin_lock(&inode->i_lock);
1311         lo->plh_block_lgets--;
1312         spin_unlock(&inode->i_lock);
1313         pnfs_put_layout_hdr(lo);
1314         return ret;
1315 }
1316
1317 bool pnfs_roc(struct inode *ino,
1318                 struct nfs4_layoutreturn_args *args,
1319                 struct nfs4_layoutreturn_res *res,
1320                 const struct rpc_cred *cred)
1321 {
1322         struct nfs_inode *nfsi = NFS_I(ino);
1323         struct nfs_open_context *ctx;
1324         struct nfs4_state *state;
1325         struct pnfs_layout_hdr *lo;
1326         struct pnfs_layout_segment *lseg, *next;
1327         nfs4_stateid stateid;
1328         enum pnfs_iomode iomode = 0;
1329         bool layoutreturn = false, roc = false;
1330         bool skip_read = false;
1331
1332         if (!nfs_have_layout(ino))
1333                 return false;
1334 retry:
1335         spin_lock(&ino->i_lock);
1336         lo = nfsi->layout;
1337         if (!lo || !pnfs_layout_is_valid(lo) ||
1338             test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) {
1339                 lo = NULL;
1340                 goto out_noroc;
1341         }
1342         pnfs_get_layout_hdr(lo);
1343         if (test_bit(NFS_LAYOUT_RETURN_LOCK, &lo->plh_flags)) {
1344                 spin_unlock(&ino->i_lock);
1345                 wait_on_bit(&lo->plh_flags, NFS_LAYOUT_RETURN,
1346                                 TASK_UNINTERRUPTIBLE);
1347                 pnfs_put_layout_hdr(lo);
1348                 goto retry;
1349         }
1350
1351         /* no roc if we hold a delegation */
1352         if (nfs4_check_delegation(ino, FMODE_READ)) {
1353                 if (nfs4_check_delegation(ino, FMODE_WRITE))
1354                         goto out_noroc;
1355                 skip_read = true;
1356         }
1357
1358         list_for_each_entry(ctx, &nfsi->open_files, list) {
1359                 state = ctx->state;
1360                 if (state == NULL)
1361                         continue;
1362                 /* Don't return layout if there is open file state */
1363                 if (state->state & FMODE_WRITE)
1364                         goto out_noroc;
1365                 if (state->state & FMODE_READ)
1366                         skip_read = true;
1367         }
1368
1369
1370         list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list) {
1371                 if (skip_read && lseg->pls_range.iomode == IOMODE_READ)
1372                         continue;
1373                 /* If we are sending layoutreturn, invalidate all valid lsegs */
1374                 if (!test_and_clear_bit(NFS_LSEG_ROC, &lseg->pls_flags))
1375                         continue;
1376                 /*
1377                  * Note: mark lseg for return so pnfs_layout_remove_lseg
1378                  * doesn't invalidate the layout for us.
1379                  */
1380                 set_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags);
1381                 if (!mark_lseg_invalid(lseg, &lo->plh_return_segs))
1382                         continue;
1383                 pnfs_set_plh_return_info(lo, lseg->pls_range.iomode, 0);
1384         }
1385
1386         if (!test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags))
1387                 goto out_noroc;
1388
1389         /* ROC in two conditions:
1390          * 1. there are ROC lsegs
1391          * 2. we don't send layoutreturn
1392          */
1393         /* lo ref dropped in pnfs_roc_release() */
1394         layoutreturn = pnfs_prepare_layoutreturn(lo, &stateid, &iomode);
1395         /* If the creds don't match, we can't compound the layoutreturn */
1396         if (!layoutreturn || cred != lo->plh_lc_cred)
1397                 goto out_noroc;
1398
1399         roc = layoutreturn;
1400         pnfs_init_layoutreturn_args(args, lo, &stateid, iomode);
1401         res->lrs_present = 0;
1402         layoutreturn = false;
1403
1404 out_noroc:
1405         spin_unlock(&ino->i_lock);
1406         pnfs_layoutcommit_inode(ino, true);
1407         if (roc) {
1408                 struct pnfs_layoutdriver_type *ld = NFS_SERVER(ino)->pnfs_curr_ld;
1409                 if (ld->prepare_layoutreturn)
1410                         ld->prepare_layoutreturn(args);
1411                 pnfs_put_layout_hdr(lo);
1412                 return true;
1413         }
1414         if (layoutreturn)
1415                 pnfs_send_layoutreturn(lo, &stateid, iomode, true);
1416         pnfs_put_layout_hdr(lo);
1417         return false;
1418 }
1419
1420 void pnfs_roc_release(struct nfs4_layoutreturn_args *args,
1421                 struct nfs4_layoutreturn_res *res,
1422                 int ret)
1423 {
1424         struct pnfs_layout_hdr *lo = args->layout;
1425         const nfs4_stateid *arg_stateid = NULL;
1426         const nfs4_stateid *res_stateid = NULL;
1427         struct nfs4_xdr_opaque_data *ld_private = args->ld_private;
1428
1429         if (ret == 0) {
1430                 arg_stateid = &args->stateid;
1431                 if (res->lrs_present)
1432                         res_stateid = &res->stateid;
1433         }
1434         pnfs_layoutreturn_free_lsegs(lo, arg_stateid, &args->range,
1435                         res_stateid);
1436         if (ld_private && ld_private->ops && ld_private->ops->free)
1437                 ld_private->ops->free(ld_private);
1438         pnfs_put_layout_hdr(lo);
1439         trace_nfs4_layoutreturn_on_close(args->inode, 0);
1440 }
1441
1442 bool pnfs_wait_on_layoutreturn(struct inode *ino, struct rpc_task *task)
1443 {
1444         struct nfs_inode *nfsi = NFS_I(ino);
1445         struct pnfs_layout_hdr *lo;
1446         bool sleep = false;
1447
1448         /* we might not have grabbed lo reference. so need to check under
1449          * i_lock */
1450         spin_lock(&ino->i_lock);
1451         lo = nfsi->layout;
1452         if (lo && test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags)) {
1453                 rpc_sleep_on(&NFS_SERVER(ino)->roc_rpcwaitq, task, NULL);
1454                 sleep = true;
1455         }
1456         spin_unlock(&ino->i_lock);
1457         return sleep;
1458 }
1459
1460 /*
1461  * Compare two layout segments for sorting into layout cache.
1462  * We want to preferentially return RW over RO layouts, so ensure those
1463  * are seen first.
1464  */
1465 static s64
1466 pnfs_lseg_range_cmp(const struct pnfs_layout_range *l1,
1467            const struct pnfs_layout_range *l2)
1468 {
1469         s64 d;
1470
1471         /* high offset > low offset */
1472         d = l1->offset - l2->offset;
1473         if (d)
1474                 return d;
1475
1476         /* short length > long length */
1477         d = l2->length - l1->length;
1478         if (d)
1479                 return d;
1480
1481         /* read > read/write */
1482         return (int)(l1->iomode == IOMODE_READ) - (int)(l2->iomode == IOMODE_READ);
1483 }
1484
1485 static bool
1486 pnfs_lseg_range_is_after(const struct pnfs_layout_range *l1,
1487                 const struct pnfs_layout_range *l2)
1488 {
1489         return pnfs_lseg_range_cmp(l1, l2) > 0;
1490 }
1491
1492 static bool
1493 pnfs_lseg_no_merge(struct pnfs_layout_segment *lseg,
1494                 struct pnfs_layout_segment *old)
1495 {
1496         return false;
1497 }
1498
1499 void
1500 pnfs_generic_layout_insert_lseg(struct pnfs_layout_hdr *lo,
1501                    struct pnfs_layout_segment *lseg,
1502                    bool (*is_after)(const struct pnfs_layout_range *,
1503                            const struct pnfs_layout_range *),
1504                    bool (*do_merge)(struct pnfs_layout_segment *,
1505                            struct pnfs_layout_segment *),
1506                    struct list_head *free_me)
1507 {
1508         struct pnfs_layout_segment *lp, *tmp;
1509
1510         dprintk("%s:Begin\n", __func__);
1511
1512         list_for_each_entry_safe(lp, tmp, &lo->plh_segs, pls_list) {
1513                 if (test_bit(NFS_LSEG_VALID, &lp->pls_flags) == 0)
1514                         continue;
1515                 if (do_merge(lseg, lp)) {
1516                         mark_lseg_invalid(lp, free_me);
1517                         continue;
1518                 }
1519                 if (is_after(&lseg->pls_range, &lp->pls_range))
1520                         continue;
1521                 list_add_tail(&lseg->pls_list, &lp->pls_list);
1522                 dprintk("%s: inserted lseg %p "
1523                         "iomode %d offset %llu length %llu before "
1524                         "lp %p iomode %d offset %llu length %llu\n",
1525                         __func__, lseg, lseg->pls_range.iomode,
1526                         lseg->pls_range.offset, lseg->pls_range.length,
1527                         lp, lp->pls_range.iomode, lp->pls_range.offset,
1528                         lp->pls_range.length);
1529                 goto out;
1530         }
1531         list_add_tail(&lseg->pls_list, &lo->plh_segs);
1532         dprintk("%s: inserted lseg %p "
1533                 "iomode %d offset %llu length %llu at tail\n",
1534                 __func__, lseg, lseg->pls_range.iomode,
1535                 lseg->pls_range.offset, lseg->pls_range.length);
1536 out:
1537         pnfs_get_layout_hdr(lo);
1538
1539         dprintk("%s:Return\n", __func__);
1540 }
1541 EXPORT_SYMBOL_GPL(pnfs_generic_layout_insert_lseg);
1542
1543 static void
1544 pnfs_layout_insert_lseg(struct pnfs_layout_hdr *lo,
1545                    struct pnfs_layout_segment *lseg,
1546                    struct list_head *free_me)
1547 {
1548         struct inode *inode = lo->plh_inode;
1549         struct pnfs_layoutdriver_type *ld = NFS_SERVER(inode)->pnfs_curr_ld;
1550
1551         if (ld->add_lseg != NULL)
1552                 ld->add_lseg(lo, lseg, free_me);
1553         else
1554                 pnfs_generic_layout_insert_lseg(lo, lseg,
1555                                 pnfs_lseg_range_is_after,
1556                                 pnfs_lseg_no_merge,
1557                                 free_me);
1558 }
1559
1560 static struct pnfs_layout_hdr *
1561 alloc_init_layout_hdr(struct inode *ino,
1562                       struct nfs_open_context *ctx,
1563                       gfp_t gfp_flags)
1564 {
1565         struct pnfs_layout_hdr *lo;
1566
1567         lo = pnfs_alloc_layout_hdr(ino, gfp_flags);
1568         if (!lo)
1569                 return NULL;
1570         refcount_set(&lo->plh_refcount, 1);
1571         INIT_LIST_HEAD(&lo->plh_layouts);
1572         INIT_LIST_HEAD(&lo->plh_segs);
1573         INIT_LIST_HEAD(&lo->plh_return_segs);
1574         INIT_LIST_HEAD(&lo->plh_bulk_destroy);
1575         lo->plh_inode = ino;
1576         lo->plh_lc_cred = get_rpccred(ctx->cred);
1577         lo->plh_flags |= 1 << NFS_LAYOUT_INVALID_STID;
1578         return lo;
1579 }
1580
1581 static struct pnfs_layout_hdr *
1582 pnfs_find_alloc_layout(struct inode *ino,
1583                        struct nfs_open_context *ctx,
1584                        gfp_t gfp_flags)
1585         __releases(&ino->i_lock)
1586         __acquires(&ino->i_lock)
1587 {
1588         struct nfs_inode *nfsi = NFS_I(ino);
1589         struct pnfs_layout_hdr *new = NULL;
1590
1591         dprintk("%s Begin ino=%p layout=%p\n", __func__, ino, nfsi->layout);
1592
1593         if (nfsi->layout != NULL)
1594                 goto out_existing;
1595         spin_unlock(&ino->i_lock);
1596         new = alloc_init_layout_hdr(ino, ctx, gfp_flags);
1597         spin_lock(&ino->i_lock);
1598
1599         if (likely(nfsi->layout == NULL)) {     /* Won the race? */
1600                 nfsi->layout = new;
1601                 return new;
1602         } else if (new != NULL)
1603                 pnfs_free_layout_hdr(new);
1604 out_existing:
1605         pnfs_get_layout_hdr(nfsi->layout);
1606         return nfsi->layout;
1607 }
1608
1609 /*
1610  * iomode matching rules:
1611  * iomode       lseg    strict match
1612  *                      iomode
1613  * -----        -----   ------ -----
1614  * ANY          READ    N/A    true
1615  * ANY          RW      N/A    true
1616  * RW           READ    N/A    false
1617  * RW           RW      N/A    true
1618  * READ         READ    N/A    true
1619  * READ         RW      true   false
1620  * READ         RW      false  true
1621  */
1622 static bool
1623 pnfs_lseg_range_match(const struct pnfs_layout_range *ls_range,
1624                  const struct pnfs_layout_range *range,
1625                  bool strict_iomode)
1626 {
1627         struct pnfs_layout_range range1;
1628
1629         if ((range->iomode == IOMODE_RW &&
1630              ls_range->iomode != IOMODE_RW) ||
1631             (range->iomode != ls_range->iomode &&
1632              strict_iomode) ||
1633             !pnfs_lseg_range_intersecting(ls_range, range))
1634                 return false;
1635
1636         /* range1 covers only the first byte in the range */
1637         range1 = *range;
1638         range1.length = 1;
1639         return pnfs_lseg_range_contained(ls_range, &range1);
1640 }
1641
1642 /*
1643  * lookup range in layout
1644  */
1645 static struct pnfs_layout_segment *
1646 pnfs_find_lseg(struct pnfs_layout_hdr *lo,
1647                 struct pnfs_layout_range *range,
1648                 bool strict_iomode)
1649 {
1650         struct pnfs_layout_segment *lseg, *ret = NULL;
1651
1652         dprintk("%s:Begin\n", __func__);
1653
1654         list_for_each_entry(lseg, &lo->plh_segs, pls_list) {
1655                 if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags) &&
1656                     pnfs_lseg_range_match(&lseg->pls_range, range,
1657                                           strict_iomode)) {
1658                         ret = pnfs_get_lseg(lseg);
1659                         break;
1660                 }
1661         }
1662
1663         dprintk("%s:Return lseg %p ref %d\n",
1664                 __func__, ret, ret ? refcount_read(&ret->pls_refcount) : 0);
1665         return ret;
1666 }
1667
1668 /*
1669  * Use mdsthreshold hints set at each OPEN to determine if I/O should go
1670  * to the MDS or over pNFS
1671  *
1672  * The nfs_inode read_io and write_io fields are cumulative counters reset
1673  * when there are no layout segments. Note that in pnfs_update_layout iomode
1674  * is set to IOMODE_READ for a READ request, and set to IOMODE_RW for a
1675  * WRITE request.
1676  *
1677  * A return of true means use MDS I/O.
1678  *
1679  * From rfc 5661:
1680  * If a file's size is smaller than the file size threshold, data accesses
1681  * SHOULD be sent to the metadata server.  If an I/O request has a length that
1682  * is below the I/O size threshold, the I/O SHOULD be sent to the metadata
1683  * server.  If both file size and I/O size are provided, the client SHOULD
1684  * reach or exceed  both thresholds before sending its read or write
1685  * requests to the data server.
1686  */
1687 static bool pnfs_within_mdsthreshold(struct nfs_open_context *ctx,
1688                                      struct inode *ino, int iomode)
1689 {
1690         struct nfs4_threshold *t = ctx->mdsthreshold;
1691         struct nfs_inode *nfsi = NFS_I(ino);
1692         loff_t fsize = i_size_read(ino);
1693         bool size = false, size_set = false, io = false, io_set = false, ret = false;
1694
1695         if (t == NULL)
1696                 return ret;
1697
1698         dprintk("%s bm=0x%x rd_sz=%llu wr_sz=%llu rd_io=%llu wr_io=%llu\n",
1699                 __func__, t->bm, t->rd_sz, t->wr_sz, t->rd_io_sz, t->wr_io_sz);
1700
1701         switch (iomode) {
1702         case IOMODE_READ:
1703                 if (t->bm & THRESHOLD_RD) {
1704                         dprintk("%s fsize %llu\n", __func__, fsize);
1705                         size_set = true;
1706                         if (fsize < t->rd_sz)
1707                                 size = true;
1708                 }
1709                 if (t->bm & THRESHOLD_RD_IO) {
1710                         dprintk("%s nfsi->read_io %llu\n", __func__,
1711                                 nfsi->read_io);
1712                         io_set = true;
1713                         if (nfsi->read_io < t->rd_io_sz)
1714                                 io = true;
1715                 }
1716                 break;
1717         case IOMODE_RW:
1718                 if (t->bm & THRESHOLD_WR) {
1719                         dprintk("%s fsize %llu\n", __func__, fsize);
1720                         size_set = true;
1721                         if (fsize < t->wr_sz)
1722                                 size = true;
1723                 }
1724                 if (t->bm & THRESHOLD_WR_IO) {
1725                         dprintk("%s nfsi->write_io %llu\n", __func__,
1726                                 nfsi->write_io);
1727                         io_set = true;
1728                         if (nfsi->write_io < t->wr_io_sz)
1729                                 io = true;
1730                 }
1731                 break;
1732         }
1733         if (size_set && io_set) {
1734                 if (size && io)
1735                         ret = true;
1736         } else if (size || io)
1737                 ret = true;
1738
1739         dprintk("<-- %s size %d io %d ret %d\n", __func__, size, io, ret);
1740         return ret;
1741 }
1742
1743 static int pnfs_prepare_to_retry_layoutget(struct pnfs_layout_hdr *lo)
1744 {
1745         /*
1746          * send layoutcommit as it can hold up layoutreturn due to lseg
1747          * reference
1748          */
1749         pnfs_layoutcommit_inode(lo->plh_inode, false);
1750         return wait_on_bit_action(&lo->plh_flags, NFS_LAYOUT_RETURN,
1751                                    nfs_wait_bit_killable,
1752                                    TASK_KILLABLE);
1753 }
1754
1755 static void nfs_layoutget_begin(struct pnfs_layout_hdr *lo)
1756 {
1757         atomic_inc(&lo->plh_outstanding);
1758 }
1759
1760 static void nfs_layoutget_end(struct pnfs_layout_hdr *lo)
1761 {
1762         if (atomic_dec_and_test(&lo->plh_outstanding))
1763                 wake_up_var(&lo->plh_outstanding);
1764 }
1765
1766 static void pnfs_clear_first_layoutget(struct pnfs_layout_hdr *lo)
1767 {
1768         unsigned long *bitlock = &lo->plh_flags;
1769
1770         clear_bit_unlock(NFS_LAYOUT_FIRST_LAYOUTGET, bitlock);
1771         smp_mb__after_atomic();
1772         wake_up_bit(bitlock, NFS_LAYOUT_FIRST_LAYOUTGET);
1773 }
1774
1775 static void _add_to_server_list(struct pnfs_layout_hdr *lo,
1776                                 struct nfs_server *server)
1777 {
1778         if (list_empty(&lo->plh_layouts)) {
1779                 struct nfs_client *clp = server->nfs_client;
1780
1781                 /* The lo must be on the clp list if there is any
1782                  * chance of a CB_LAYOUTRECALL(FILE) coming in.
1783                  */
1784                 spin_lock(&clp->cl_lock);
1785                 if (list_empty(&lo->plh_layouts))
1786                         list_add_tail(&lo->plh_layouts, &server->layouts);
1787                 spin_unlock(&clp->cl_lock);
1788         }
1789 }
1790
1791 /*
1792  * Layout segment is retreived from the server if not cached.
1793  * The appropriate layout segment is referenced and returned to the caller.
1794  */
1795 struct pnfs_layout_segment *
1796 pnfs_update_layout(struct inode *ino,
1797                    struct nfs_open_context *ctx,
1798                    loff_t pos,
1799                    u64 count,
1800                    enum pnfs_iomode iomode,
1801                    bool strict_iomode,
1802                    gfp_t gfp_flags)
1803 {
1804         struct pnfs_layout_range arg = {
1805                 .iomode = iomode,
1806                 .offset = pos,
1807                 .length = count,
1808         };
1809         unsigned pg_offset;
1810         struct nfs_server *server = NFS_SERVER(ino);
1811         struct nfs_client *clp = server->nfs_client;
1812         struct pnfs_layout_hdr *lo = NULL;
1813         struct pnfs_layout_segment *lseg = NULL;
1814         struct nfs4_layoutget *lgp;
1815         nfs4_stateid stateid;
1816         long timeout = 0;
1817         unsigned long giveup = jiffies + (clp->cl_lease_time << 1);
1818         bool first;
1819
1820         if (!pnfs_enabled_sb(NFS_SERVER(ino))) {
1821                 trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
1822                                  PNFS_UPDATE_LAYOUT_NO_PNFS);
1823                 goto out;
1824         }
1825
1826         if (pnfs_within_mdsthreshold(ctx, ino, iomode)) {
1827                 trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
1828                                  PNFS_UPDATE_LAYOUT_MDSTHRESH);
1829                 goto out;
1830         }
1831
1832 lookup_again:
1833         lseg = ERR_PTR(nfs4_client_recover_expired_lease(clp));
1834         if (IS_ERR(lseg))
1835                 goto out;
1836         first = false;
1837         spin_lock(&ino->i_lock);
1838         lo = pnfs_find_alloc_layout(ino, ctx, gfp_flags);
1839         if (lo == NULL) {
1840                 spin_unlock(&ino->i_lock);
1841                 trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
1842                                  PNFS_UPDATE_LAYOUT_NOMEM);
1843                 goto out;
1844         }
1845
1846         /* Do we even need to bother with this? */
1847         if (test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) {
1848                 trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
1849                                  PNFS_UPDATE_LAYOUT_BULK_RECALL);
1850                 dprintk("%s matches recall, use MDS\n", __func__);
1851                 goto out_unlock;
1852         }
1853
1854         /* if LAYOUTGET already failed once we don't try again */
1855         if (pnfs_layout_io_test_failed(lo, iomode)) {
1856                 trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
1857                                  PNFS_UPDATE_LAYOUT_IO_TEST_FAIL);
1858                 goto out_unlock;
1859         }
1860
1861         /*
1862          * If the layout segment list is empty, but there are outstanding
1863          * layoutget calls, then they might be subject to a layoutrecall.
1864          */
1865         if (list_empty(&lo->plh_segs) &&
1866             atomic_read(&lo->plh_outstanding) != 0) {
1867                 spin_unlock(&ino->i_lock);
1868                 lseg = ERR_PTR(wait_var_event_killable(&lo->plh_outstanding,
1869                                         atomic_read(&lo->plh_outstanding)));
1870                 if (IS_ERR(lseg) || !list_empty(&lo->plh_segs))
1871                         goto out_put_layout_hdr;
1872                 pnfs_put_layout_hdr(lo);
1873                 goto lookup_again;
1874         }
1875
1876         lseg = pnfs_find_lseg(lo, &arg, strict_iomode);
1877         if (lseg) {
1878                 trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
1879                                 PNFS_UPDATE_LAYOUT_FOUND_CACHED);
1880                 goto out_unlock;
1881         }
1882
1883         if (!nfs4_valid_open_stateid(ctx->state)) {
1884                 trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
1885                                 PNFS_UPDATE_LAYOUT_INVALID_OPEN);
1886                 goto out_unlock;
1887         }
1888
1889         /*
1890          * Choose a stateid for the LAYOUTGET. If we don't have a layout
1891          * stateid, or it has been invalidated, then we must use the open
1892          * stateid.
1893          */
1894         if (test_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags)) {
1895
1896                 /*
1897                  * The first layoutget for the file. Need to serialize per
1898                  * RFC 5661 Errata 3208.
1899                  */
1900                 if (test_and_set_bit(NFS_LAYOUT_FIRST_LAYOUTGET,
1901                                      &lo->plh_flags)) {
1902                         spin_unlock(&ino->i_lock);
1903                         lseg = ERR_PTR(wait_on_bit(&lo->plh_flags,
1904                                                 NFS_LAYOUT_FIRST_LAYOUTGET,
1905                                                 TASK_KILLABLE));
1906                         if (IS_ERR(lseg))
1907                                 goto out_put_layout_hdr;
1908                         pnfs_put_layout_hdr(lo);
1909                         dprintk("%s retrying\n", __func__);
1910                         goto lookup_again;
1911                 }
1912
1913                 first = true;
1914                 if (nfs4_select_rw_stateid(ctx->state,
1915                                         iomode == IOMODE_RW ? FMODE_WRITE : FMODE_READ,
1916                                         NULL, &stateid, NULL) != 0) {
1917                         trace_pnfs_update_layout(ino, pos, count,
1918                                         iomode, lo, lseg,
1919                                         PNFS_UPDATE_LAYOUT_INVALID_OPEN);
1920                         goto out_unlock;
1921                 }
1922         } else {
1923                 nfs4_stateid_copy(&stateid, &lo->plh_stateid);
1924         }
1925
1926         /*
1927          * Because we free lsegs before sending LAYOUTRETURN, we need to wait
1928          * for LAYOUTRETURN even if first is true.
1929          */
1930         if (test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags)) {
1931                 spin_unlock(&ino->i_lock);
1932                 dprintk("%s wait for layoutreturn\n", __func__);
1933                 lseg = ERR_PTR(pnfs_prepare_to_retry_layoutget(lo));
1934                 if (!IS_ERR(lseg)) {
1935                         if (first)
1936                                 pnfs_clear_first_layoutget(lo);
1937                         pnfs_put_layout_hdr(lo);
1938                         dprintk("%s retrying\n", __func__);
1939                         trace_pnfs_update_layout(ino, pos, count, iomode, lo,
1940                                         lseg, PNFS_UPDATE_LAYOUT_RETRY);
1941                         goto lookup_again;
1942                 }
1943                 trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
1944                                 PNFS_UPDATE_LAYOUT_RETURN);
1945                 goto out_put_layout_hdr;
1946         }
1947
1948         if (pnfs_layoutgets_blocked(lo)) {
1949                 trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
1950                                 PNFS_UPDATE_LAYOUT_BLOCKED);
1951                 goto out_unlock;
1952         }
1953         nfs_layoutget_begin(lo);
1954         spin_unlock(&ino->i_lock);
1955
1956         _add_to_server_list(lo, server);
1957
1958         pg_offset = arg.offset & ~PAGE_MASK;
1959         if (pg_offset) {
1960                 arg.offset -= pg_offset;
1961                 arg.length += pg_offset;
1962         }
1963         if (arg.length != NFS4_MAX_UINT64)
1964                 arg.length = PAGE_ALIGN(arg.length);
1965
1966         lgp = pnfs_alloc_init_layoutget_args(ino, ctx, &stateid, &arg, gfp_flags);
1967         if (!lgp) {
1968                 trace_pnfs_update_layout(ino, pos, count, iomode, lo, NULL,
1969                                          PNFS_UPDATE_LAYOUT_NOMEM);
1970                 nfs_layoutget_end(lo);
1971                 goto out_put_layout_hdr;
1972         }
1973
1974         lseg = nfs4_proc_layoutget(lgp, &timeout);
1975         trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
1976                                  PNFS_UPDATE_LAYOUT_SEND_LAYOUTGET);
1977         nfs_layoutget_end(lo);
1978         if (IS_ERR(lseg)) {
1979                 switch(PTR_ERR(lseg)) {
1980                 case -EBUSY:
1981                         if (time_after(jiffies, giveup))
1982                                 lseg = NULL;
1983                         break;
1984                 case -ERECALLCONFLICT:
1985                 case -EAGAIN:
1986                         break;
1987                 default:
1988                         if (!nfs_error_is_fatal(PTR_ERR(lseg))) {
1989                                 pnfs_layout_clear_fail_bit(lo, pnfs_iomode_to_fail_bit(iomode));
1990                                 lseg = NULL;
1991                         }
1992                         goto out_put_layout_hdr;
1993                 }
1994                 if (lseg) {
1995                         if (first)
1996                                 pnfs_clear_first_layoutget(lo);
1997                         trace_pnfs_update_layout(ino, pos, count,
1998                                 iomode, lo, lseg, PNFS_UPDATE_LAYOUT_RETRY);
1999                         pnfs_put_layout_hdr(lo);
2000                         goto lookup_again;
2001                 }
2002         } else {
2003                 pnfs_layout_clear_fail_bit(lo, pnfs_iomode_to_fail_bit(iomode));
2004         }
2005
2006 out_put_layout_hdr:
2007         if (first)
2008                 pnfs_clear_first_layoutget(lo);
2009         pnfs_put_layout_hdr(lo);
2010 out:
2011         dprintk("%s: inode %s/%llu pNFS layout segment %s for "
2012                         "(%s, offset: %llu, length: %llu)\n",
2013                         __func__, ino->i_sb->s_id,
2014                         (unsigned long long)NFS_FILEID(ino),
2015                         IS_ERR_OR_NULL(lseg) ? "not found" : "found",
2016                         iomode==IOMODE_RW ?  "read/write" : "read-only",
2017                         (unsigned long long)pos,
2018                         (unsigned long long)count);
2019         return lseg;
2020 out_unlock:
2021         spin_unlock(&ino->i_lock);
2022         goto out_put_layout_hdr;
2023 }
2024 EXPORT_SYMBOL_GPL(pnfs_update_layout);
2025
2026 static bool
2027 pnfs_sanity_check_layout_range(struct pnfs_layout_range *range)
2028 {
2029         switch (range->iomode) {
2030         case IOMODE_READ:
2031         case IOMODE_RW:
2032                 break;
2033         default:
2034                 return false;
2035         }
2036         if (range->offset == NFS4_MAX_UINT64)
2037                 return false;
2038         if (range->length == 0)
2039                 return false;
2040         if (range->length != NFS4_MAX_UINT64 &&
2041             range->length > NFS4_MAX_UINT64 - range->offset)
2042                 return false;
2043         return true;
2044 }
2045
2046 static struct pnfs_layout_hdr *
2047 _pnfs_grab_empty_layout(struct inode *ino, struct nfs_open_context *ctx)
2048 {
2049         struct pnfs_layout_hdr *lo;
2050
2051         spin_lock(&ino->i_lock);
2052         lo = pnfs_find_alloc_layout(ino, ctx, GFP_KERNEL);
2053         if (!lo)
2054                 goto out_unlock;
2055         if (!test_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags))
2056                 goto out_unlock;
2057         if (test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags))
2058                 goto out_unlock;
2059         if (pnfs_layoutgets_blocked(lo))
2060                 goto out_unlock;
2061         if (test_and_set_bit(NFS_LAYOUT_FIRST_LAYOUTGET, &lo->plh_flags))
2062                 goto out_unlock;
2063         nfs_layoutget_begin(lo);
2064         spin_unlock(&ino->i_lock);
2065         _add_to_server_list(lo, NFS_SERVER(ino));
2066         return lo;
2067
2068 out_unlock:
2069         spin_unlock(&ino->i_lock);
2070         pnfs_put_layout_hdr(lo);
2071         return NULL;
2072 }
2073
2074 extern const nfs4_stateid current_stateid;
2075
2076 static void _lgopen_prepare_attached(struct nfs4_opendata *data,
2077                                      struct nfs_open_context *ctx)
2078 {
2079         struct inode *ino = data->dentry->d_inode;
2080         struct pnfs_layout_range rng = {
2081                 .iomode = (data->o_arg.fmode & FMODE_WRITE) ?
2082                           IOMODE_RW: IOMODE_READ,
2083                 .offset = 0,
2084                 .length = NFS4_MAX_UINT64,
2085         };
2086         struct nfs4_layoutget *lgp;
2087         struct pnfs_layout_hdr *lo;
2088
2089         /* Heuristic: don't send layoutget if we have cached data */
2090         if (rng.iomode == IOMODE_READ &&
2091            (i_size_read(ino) == 0 || ino->i_mapping->nrpages != 0))
2092                 return;
2093
2094         lo = _pnfs_grab_empty_layout(ino, ctx);
2095         if (!lo)
2096                 return;
2097         lgp = pnfs_alloc_init_layoutget_args(ino, ctx, &current_stateid,
2098                                              &rng, GFP_KERNEL);
2099         if (!lgp) {
2100                 pnfs_clear_first_layoutget(lo);
2101                 pnfs_put_layout_hdr(lo);
2102                 return;
2103         }
2104         data->lgp = lgp;
2105         data->o_arg.lg_args = &lgp->args;
2106         data->o_res.lg_res = &lgp->res;
2107 }
2108
2109 static void _lgopen_prepare_floating(struct nfs4_opendata *data,
2110                                      struct nfs_open_context *ctx)
2111 {
2112         struct pnfs_layout_range rng = {
2113                 .iomode = (data->o_arg.fmode & FMODE_WRITE) ?
2114                           IOMODE_RW: IOMODE_READ,
2115                 .offset = 0,
2116                 .length = NFS4_MAX_UINT64,
2117         };
2118         struct nfs4_layoutget *lgp;
2119
2120         lgp = pnfs_alloc_init_layoutget_args(NULL, ctx, &current_stateid,
2121                                              &rng, GFP_KERNEL);
2122         if (!lgp)
2123                 return;
2124         data->lgp = lgp;
2125         data->o_arg.lg_args = &lgp->args;
2126         data->o_res.lg_res = &lgp->res;
2127 }
2128
2129 void pnfs_lgopen_prepare(struct nfs4_opendata *data,
2130                          struct nfs_open_context *ctx)
2131 {
2132         struct nfs_server *server = NFS_SERVER(data->dir->d_inode);
2133
2134         if (!(pnfs_enabled_sb(server) &&
2135               server->pnfs_curr_ld->flags & PNFS_LAYOUTGET_ON_OPEN))
2136                 return;
2137         /* Could check on max_ops, but currently hardcoded high enough */
2138         if (!nfs_server_capable(data->dir->d_inode, NFS_CAP_LGOPEN))
2139                 return;
2140         if (data->state)
2141                 _lgopen_prepare_attached(data, ctx);
2142         else
2143                 _lgopen_prepare_floating(data, ctx);
2144 }
2145
2146 void pnfs_parse_lgopen(struct inode *ino, struct nfs4_layoutget *lgp,
2147                        struct nfs_open_context *ctx)
2148 {
2149         struct pnfs_layout_hdr *lo;
2150         struct pnfs_layout_segment *lseg;
2151         struct nfs_server *srv = NFS_SERVER(ino);
2152         u32 iomode;
2153
2154         if (!lgp)
2155                 return;
2156         dprintk("%s: entered with status %i\n", __func__, lgp->res.status);
2157         if (lgp->res.status) {
2158                 switch (lgp->res.status) {
2159                 default:
2160                         break;
2161                 /*
2162                  * Halt lgopen attempts if the server doesn't recognise
2163                  * the "current stateid" value, the layout type, or the
2164                  * layoutget operation as being valid.
2165                  * Also if it complains about too many ops in the compound
2166                  * or of the request/reply being too big.
2167                  */
2168                 case -NFS4ERR_BAD_STATEID:
2169                 case -NFS4ERR_NOTSUPP:
2170                 case -NFS4ERR_REP_TOO_BIG:
2171                 case -NFS4ERR_REP_TOO_BIG_TO_CACHE:
2172                 case -NFS4ERR_REQ_TOO_BIG:
2173                 case -NFS4ERR_TOO_MANY_OPS:
2174                 case -NFS4ERR_UNKNOWN_LAYOUTTYPE:
2175                         srv->caps &= ~NFS_CAP_LGOPEN;
2176                 }
2177                 return;
2178         }
2179         if (!lgp->args.inode) {
2180                 lo = _pnfs_grab_empty_layout(ino, ctx);
2181                 if (!lo)
2182                         return;
2183                 lgp->args.inode = ino;
2184         } else
2185                 lo = NFS_I(lgp->args.inode)->layout;
2186
2187         lseg = pnfs_layout_process(lgp);
2188         if (!IS_ERR(lseg)) {
2189                 iomode = lgp->args.range.iomode;
2190                 pnfs_layout_clear_fail_bit(lo, pnfs_iomode_to_fail_bit(iomode));
2191                 pnfs_put_lseg(lseg);
2192         }
2193 }
2194
2195 void nfs4_lgopen_release(struct nfs4_layoutget *lgp)
2196 {
2197         if (lgp != NULL) {
2198                 struct inode *inode = lgp->args.inode;
2199                 if (inode) {
2200                         struct pnfs_layout_hdr *lo = NFS_I(inode)->layout;
2201                         pnfs_clear_first_layoutget(lo);
2202                         nfs_layoutget_end(lo);
2203                 }
2204                 pnfs_layoutget_free(lgp);
2205         }
2206 }
2207
2208 struct pnfs_layout_segment *
2209 pnfs_layout_process(struct nfs4_layoutget *lgp)
2210 {
2211         struct pnfs_layout_hdr *lo = NFS_I(lgp->args.inode)->layout;
2212         struct nfs4_layoutget_res *res = &lgp->res;
2213         struct pnfs_layout_segment *lseg;
2214         struct inode *ino = lo->plh_inode;
2215         LIST_HEAD(free_me);
2216
2217         if (!pnfs_sanity_check_layout_range(&res->range))
2218                 return ERR_PTR(-EINVAL);
2219
2220         /* Inject layout blob into I/O device driver */
2221         lseg = NFS_SERVER(ino)->pnfs_curr_ld->alloc_lseg(lo, res, lgp->gfp_flags);
2222         if (IS_ERR_OR_NULL(lseg)) {
2223                 if (!lseg)
2224                         lseg = ERR_PTR(-ENOMEM);
2225
2226                 dprintk("%s: Could not allocate layout: error %ld\n",
2227                        __func__, PTR_ERR(lseg));
2228                 return lseg;
2229         }
2230
2231         pnfs_init_lseg(lo, lseg, &res->range, &res->stateid);
2232
2233         spin_lock(&ino->i_lock);
2234         if (pnfs_layoutgets_blocked(lo)) {
2235                 dprintk("%s forget reply due to state\n", __func__);
2236                 goto out_forget;
2237         }
2238
2239         if (!pnfs_layout_is_valid(lo)) {
2240                 /* We have a completely new layout */
2241                 pnfs_set_layout_stateid(lo, &res->stateid, true);
2242         } else if (nfs4_stateid_match_other(&lo->plh_stateid, &res->stateid)) {
2243                 /* existing state ID, make sure the sequence number matches. */
2244                 if (pnfs_layout_stateid_blocked(lo, &res->stateid)) {
2245                         dprintk("%s forget reply due to sequence\n", __func__);
2246                         goto out_forget;
2247                 }
2248                 pnfs_set_layout_stateid(lo, &res->stateid, false);
2249         } else {
2250                 /*
2251                  * We got an entirely new state ID.  Mark all segments for the
2252                  * inode invalid, and retry the layoutget
2253                  */
2254                 pnfs_mark_layout_stateid_invalid(lo, &free_me);
2255                 goto out_forget;
2256         }
2257
2258         pnfs_get_lseg(lseg);
2259         pnfs_layout_insert_lseg(lo, lseg, &free_me);
2260
2261
2262         if (res->return_on_close)
2263                 set_bit(NFS_LSEG_ROC, &lseg->pls_flags);
2264
2265         spin_unlock(&ino->i_lock);
2266         pnfs_free_lseg_list(&free_me);
2267         return lseg;
2268
2269 out_forget:
2270         spin_unlock(&ino->i_lock);
2271         lseg->pls_layout = lo;
2272         NFS_SERVER(ino)->pnfs_curr_ld->free_lseg(lseg);
2273         return ERR_PTR(-EAGAIN);
2274 }
2275
2276 static int
2277 mark_lseg_invalid_or_return(struct pnfs_layout_segment *lseg,
2278                 struct list_head *tmp_list)
2279 {
2280         if (!mark_lseg_invalid(lseg, tmp_list))
2281                 return 0;
2282         pnfs_cache_lseg_for_layoutreturn(lseg->pls_layout, lseg);
2283         return 1;
2284 }
2285
2286 /**
2287  * pnfs_mark_matching_lsegs_return - Free or return matching layout segments
2288  * @lo: pointer to layout header
2289  * @tmp_list: list header to be used with pnfs_free_lseg_list()
2290  * @return_range: describe layout segment ranges to be returned
2291  * @seq: stateid seqid to match
2292  *
2293  * This function is mainly intended for use by layoutrecall. It attempts
2294  * to free the layout segment immediately, or else to mark it for return
2295  * as soon as its reference count drops to zero.
2296  *
2297  * Returns
2298  * - 0: a layoutreturn needs to be scheduled.
2299  * - EBUSY: there are layout segment that are still in use.
2300  * - ENOENT: there are no layout segments that need to be returned.
2301  */
2302 int
2303 pnfs_mark_matching_lsegs_return(struct pnfs_layout_hdr *lo,
2304                                 struct list_head *tmp_list,
2305                                 const struct pnfs_layout_range *return_range,
2306                                 u32 seq)
2307 {
2308         struct pnfs_layout_segment *lseg, *next;
2309         int remaining = 0;
2310
2311         dprintk("%s:Begin lo %p\n", __func__, lo);
2312
2313         assert_spin_locked(&lo->plh_inode->i_lock);
2314
2315         list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list)
2316                 if (pnfs_match_lseg_recall(lseg, return_range, seq)) {
2317                         dprintk("%s: marking lseg %p iomode %d "
2318                                 "offset %llu length %llu\n", __func__,
2319                                 lseg, lseg->pls_range.iomode,
2320                                 lseg->pls_range.offset,
2321                                 lseg->pls_range.length);
2322                         if (mark_lseg_invalid_or_return(lseg, tmp_list))
2323                                 continue;
2324                         remaining++;
2325                         set_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags);
2326                 }
2327
2328         if (remaining) {
2329                 pnfs_set_plh_return_info(lo, return_range->iomode, seq);
2330                 return -EBUSY;
2331         }
2332
2333         if (!list_empty(&lo->plh_return_segs)) {
2334                 pnfs_set_plh_return_info(lo, return_range->iomode, seq);
2335                 return 0;
2336         }
2337
2338         return -ENOENT;
2339 }
2340
2341 void pnfs_error_mark_layout_for_return(struct inode *inode,
2342                                        struct pnfs_layout_segment *lseg)
2343 {
2344         struct pnfs_layout_hdr *lo = NFS_I(inode)->layout;
2345         struct pnfs_layout_range range = {
2346                 .iomode = lseg->pls_range.iomode,
2347                 .offset = 0,
2348                 .length = NFS4_MAX_UINT64,
2349         };
2350         bool return_now = false;
2351
2352         spin_lock(&inode->i_lock);
2353         if (!pnfs_layout_is_valid(lo)) {
2354                 spin_unlock(&inode->i_lock);
2355                 return;
2356         }
2357         pnfs_set_plh_return_info(lo, range.iomode, 0);
2358         /*
2359          * mark all matching lsegs so that we are sure to have no live
2360          * segments at hand when sending layoutreturn. See pnfs_put_lseg()
2361          * for how it works.
2362          */
2363         if (pnfs_mark_matching_lsegs_return(lo, &lo->plh_return_segs, &range, 0) != -EBUSY) {
2364                 nfs4_stateid stateid;
2365                 enum pnfs_iomode iomode;
2366
2367                 return_now = pnfs_prepare_layoutreturn(lo, &stateid, &iomode);
2368                 spin_unlock(&inode->i_lock);
2369                 if (return_now)
2370                         pnfs_send_layoutreturn(lo, &stateid, iomode, false);
2371         } else {
2372                 spin_unlock(&inode->i_lock);
2373                 nfs_commit_inode(inode, 0);
2374         }
2375 }
2376 EXPORT_SYMBOL_GPL(pnfs_error_mark_layout_for_return);
2377
2378 void
2379 pnfs_generic_pg_check_layout(struct nfs_pageio_descriptor *pgio)
2380 {
2381         if (pgio->pg_lseg == NULL ||
2382             test_bit(NFS_LSEG_VALID, &pgio->pg_lseg->pls_flags))
2383                 return;
2384         pnfs_put_lseg(pgio->pg_lseg);
2385         pgio->pg_lseg = NULL;
2386 }
2387 EXPORT_SYMBOL_GPL(pnfs_generic_pg_check_layout);
2388
2389 /*
2390  * Check for any intersection between the request and the pgio->pg_lseg,
2391  * and if none, put this pgio->pg_lseg away.
2392  */
2393 static void
2394 pnfs_generic_pg_check_range(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
2395 {
2396         if (pgio->pg_lseg && !pnfs_lseg_request_intersecting(pgio->pg_lseg, req)) {
2397                 pnfs_put_lseg(pgio->pg_lseg);
2398                 pgio->pg_lseg = NULL;
2399         }
2400 }
2401
2402 void
2403 pnfs_generic_pg_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
2404 {
2405         u64 rd_size = req->wb_bytes;
2406
2407         pnfs_generic_pg_check_layout(pgio);
2408         pnfs_generic_pg_check_range(pgio, req);
2409         if (pgio->pg_lseg == NULL) {
2410                 if (pgio->pg_dreq == NULL)
2411                         rd_size = i_size_read(pgio->pg_inode) - req_offset(req);
2412                 else
2413                         rd_size = nfs_dreq_bytes_left(pgio->pg_dreq);
2414
2415                 pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
2416                                                    req->wb_context,
2417                                                    req_offset(req),
2418                                                    rd_size,
2419                                                    IOMODE_READ,
2420                                                    false,
2421                                                    GFP_KERNEL);
2422                 if (IS_ERR(pgio->pg_lseg)) {
2423                         pgio->pg_error = PTR_ERR(pgio->pg_lseg);
2424                         pgio->pg_lseg = NULL;
2425                         return;
2426                 }
2427         }
2428         /* If no lseg, fall back to read through mds */
2429         if (pgio->pg_lseg == NULL)
2430                 nfs_pageio_reset_read_mds(pgio);
2431
2432 }
2433 EXPORT_SYMBOL_GPL(pnfs_generic_pg_init_read);
2434
2435 void
2436 pnfs_generic_pg_init_write(struct nfs_pageio_descriptor *pgio,
2437                            struct nfs_page *req, u64 wb_size)
2438 {
2439         pnfs_generic_pg_check_layout(pgio);
2440         pnfs_generic_pg_check_range(pgio, req);
2441         if (pgio->pg_lseg == NULL) {
2442                 pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
2443                                                    req->wb_context,
2444                                                    req_offset(req),
2445                                                    wb_size,
2446                                                    IOMODE_RW,
2447                                                    false,
2448                                                    GFP_NOFS);
2449                 if (IS_ERR(pgio->pg_lseg)) {
2450                         pgio->pg_error = PTR_ERR(pgio->pg_lseg);
2451                         pgio->pg_lseg = NULL;
2452                         return;
2453                 }
2454         }
2455         /* If no lseg, fall back to write through mds */
2456         if (pgio->pg_lseg == NULL)
2457                 nfs_pageio_reset_write_mds(pgio);
2458 }
2459 EXPORT_SYMBOL_GPL(pnfs_generic_pg_init_write);
2460
2461 void
2462 pnfs_generic_pg_cleanup(struct nfs_pageio_descriptor *desc)
2463 {
2464         if (desc->pg_lseg) {
2465                 pnfs_put_lseg(desc->pg_lseg);
2466                 desc->pg_lseg = NULL;
2467         }
2468 }
2469 EXPORT_SYMBOL_GPL(pnfs_generic_pg_cleanup);
2470
2471 /*
2472  * Return 0 if @req cannot be coalesced into @pgio, otherwise return the number
2473  * of bytes (maximum @req->wb_bytes) that can be coalesced.
2474  */
2475 size_t
2476 pnfs_generic_pg_test(struct nfs_pageio_descriptor *pgio,
2477                      struct nfs_page *prev, struct nfs_page *req)
2478 {
2479         unsigned int size;
2480         u64 seg_end, req_start, seg_left;
2481
2482         size = nfs_generic_pg_test(pgio, prev, req);
2483         if (!size)
2484                 return 0;
2485
2486         /*
2487          * 'size' contains the number of bytes left in the current page (up
2488          * to the original size asked for in @req->wb_bytes).
2489          *
2490          * Calculate how many bytes are left in the layout segment
2491          * and if there are less bytes than 'size', return that instead.
2492          *
2493          * Please also note that 'end_offset' is actually the offset of the
2494          * first byte that lies outside the pnfs_layout_range. FIXME?
2495          *
2496          */
2497         if (pgio->pg_lseg) {
2498                 seg_end = pnfs_end_offset(pgio->pg_lseg->pls_range.offset,
2499                                      pgio->pg_lseg->pls_range.length);
2500                 req_start = req_offset(req);
2501
2502                 /* start of request is past the last byte of this segment */
2503                 if (req_start >= seg_end)
2504                         return 0;
2505
2506                 /* adjust 'size' iff there are fewer bytes left in the
2507                  * segment than what nfs_generic_pg_test returned */
2508                 seg_left = seg_end - req_start;
2509                 if (seg_left < size)
2510                         size = (unsigned int)seg_left;
2511         }
2512
2513         return size;
2514 }
2515 EXPORT_SYMBOL_GPL(pnfs_generic_pg_test);
2516
2517 int pnfs_write_done_resend_to_mds(struct nfs_pgio_header *hdr)
2518 {
2519         struct nfs_pageio_descriptor pgio;
2520
2521         /* Resend all requests through the MDS */
2522         nfs_pageio_init_write(&pgio, hdr->inode, FLUSH_STABLE, true,
2523                               hdr->completion_ops);
2524         set_bit(NFS_CONTEXT_RESEND_WRITES, &hdr->args.context->flags);
2525         return nfs_pageio_resend(&pgio, hdr);
2526 }
2527 EXPORT_SYMBOL_GPL(pnfs_write_done_resend_to_mds);
2528
2529 static void pnfs_ld_handle_write_error(struct nfs_pgio_header *hdr)
2530 {
2531
2532         dprintk("pnfs write error = %d\n", hdr->pnfs_error);
2533         if (NFS_SERVER(hdr->inode)->pnfs_curr_ld->flags &
2534             PNFS_LAYOUTRET_ON_ERROR) {
2535                 pnfs_return_layout(hdr->inode);
2536         }
2537         if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags))
2538                 hdr->task.tk_status = pnfs_write_done_resend_to_mds(hdr);
2539 }
2540
2541 /*
2542  * Called by non rpc-based layout drivers
2543  */
2544 void pnfs_ld_write_done(struct nfs_pgio_header *hdr)
2545 {
2546         if (likely(!hdr->pnfs_error)) {
2547                 pnfs_set_layoutcommit(hdr->inode, hdr->lseg,
2548                                 hdr->mds_offset + hdr->res.count);
2549                 hdr->mds_ops->rpc_call_done(&hdr->task, hdr);
2550         }
2551         trace_nfs4_pnfs_write(hdr, hdr->pnfs_error);
2552         if (unlikely(hdr->pnfs_error))
2553                 pnfs_ld_handle_write_error(hdr);
2554         hdr->mds_ops->rpc_release(hdr);
2555 }
2556 EXPORT_SYMBOL_GPL(pnfs_ld_write_done);
2557
2558 static void
2559 pnfs_write_through_mds(struct nfs_pageio_descriptor *desc,
2560                 struct nfs_pgio_header *hdr)
2561 {
2562         struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
2563
2564         if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
2565                 list_splice_tail_init(&hdr->pages, &mirror->pg_list);
2566                 nfs_pageio_reset_write_mds(desc);
2567                 mirror->pg_recoalesce = 1;
2568         }
2569         hdr->completion_ops->completion(hdr);
2570 }
2571
2572 static enum pnfs_try_status
2573 pnfs_try_to_write_data(struct nfs_pgio_header *hdr,
2574                         const struct rpc_call_ops *call_ops,
2575                         struct pnfs_layout_segment *lseg,
2576                         int how)
2577 {
2578         struct inode *inode = hdr->inode;
2579         enum pnfs_try_status trypnfs;
2580         struct nfs_server *nfss = NFS_SERVER(inode);
2581
2582         hdr->mds_ops = call_ops;
2583
2584         dprintk("%s: Writing ino:%lu %u@%llu (how %d)\n", __func__,
2585                 inode->i_ino, hdr->args.count, hdr->args.offset, how);
2586         trypnfs = nfss->pnfs_curr_ld->write_pagelist(hdr, how);
2587         if (trypnfs != PNFS_NOT_ATTEMPTED)
2588                 nfs_inc_stats(inode, NFSIOS_PNFS_WRITE);
2589         dprintk("%s End (trypnfs:%d)\n", __func__, trypnfs);
2590         return trypnfs;
2591 }
2592
2593 static void
2594 pnfs_do_write(struct nfs_pageio_descriptor *desc,
2595               struct nfs_pgio_header *hdr, int how)
2596 {
2597         const struct rpc_call_ops *call_ops = desc->pg_rpc_callops;
2598         struct pnfs_layout_segment *lseg = desc->pg_lseg;
2599         enum pnfs_try_status trypnfs;
2600
2601         trypnfs = pnfs_try_to_write_data(hdr, call_ops, lseg, how);
2602         switch (trypnfs) {
2603         case PNFS_NOT_ATTEMPTED:
2604                 pnfs_write_through_mds(desc, hdr);
2605         case PNFS_ATTEMPTED:
2606                 break;
2607         case PNFS_TRY_AGAIN:
2608                 /* cleanup hdr and prepare to redo pnfs */
2609                 if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
2610                         struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
2611                         list_splice_init(&hdr->pages, &mirror->pg_list);
2612                         mirror->pg_recoalesce = 1;
2613                 }
2614                 hdr->mds_ops->rpc_release(hdr);
2615         }
2616 }
2617
2618 static void pnfs_writehdr_free(struct nfs_pgio_header *hdr)
2619 {
2620         pnfs_put_lseg(hdr->lseg);
2621         nfs_pgio_header_free(hdr);
2622 }
2623
2624 int
2625 pnfs_generic_pg_writepages(struct nfs_pageio_descriptor *desc)
2626 {
2627         struct nfs_pgio_header *hdr;
2628         int ret;
2629
2630         hdr = nfs_pgio_header_alloc(desc->pg_rw_ops);
2631         if (!hdr) {
2632                 desc->pg_error = -ENOMEM;
2633                 return desc->pg_error;
2634         }
2635         nfs_pgheader_init(desc, hdr, pnfs_writehdr_free);
2636
2637         hdr->lseg = pnfs_get_lseg(desc->pg_lseg);
2638         ret = nfs_generic_pgio(desc, hdr);
2639         if (!ret)
2640                 pnfs_do_write(desc, hdr, desc->pg_ioflags);
2641
2642         return ret;
2643 }
2644 EXPORT_SYMBOL_GPL(pnfs_generic_pg_writepages);
2645
2646 int pnfs_read_done_resend_to_mds(struct nfs_pgio_header *hdr)
2647 {
2648         struct nfs_pageio_descriptor pgio;
2649
2650         /* Resend all requests through the MDS */
2651         nfs_pageio_init_read(&pgio, hdr->inode, true, hdr->completion_ops);
2652         return nfs_pageio_resend(&pgio, hdr);
2653 }
2654 EXPORT_SYMBOL_GPL(pnfs_read_done_resend_to_mds);
2655
2656 static void pnfs_ld_handle_read_error(struct nfs_pgio_header *hdr)
2657 {
2658         dprintk("pnfs read error = %d\n", hdr->pnfs_error);
2659         if (NFS_SERVER(hdr->inode)->pnfs_curr_ld->flags &
2660             PNFS_LAYOUTRET_ON_ERROR) {
2661                 pnfs_return_layout(hdr->inode);
2662         }
2663         if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags))
2664                 hdr->task.tk_status = pnfs_read_done_resend_to_mds(hdr);
2665 }
2666
2667 /*
2668  * Called by non rpc-based layout drivers
2669  */
2670 void pnfs_ld_read_done(struct nfs_pgio_header *hdr)
2671 {
2672         if (likely(!hdr->pnfs_error))
2673                 hdr->mds_ops->rpc_call_done(&hdr->task, hdr);
2674         trace_nfs4_pnfs_read(hdr, hdr->pnfs_error);
2675         if (unlikely(hdr->pnfs_error))
2676                 pnfs_ld_handle_read_error(hdr);
2677         hdr->mds_ops->rpc_release(hdr);
2678 }
2679 EXPORT_SYMBOL_GPL(pnfs_ld_read_done);
2680
2681 static void
2682 pnfs_read_through_mds(struct nfs_pageio_descriptor *desc,
2683                 struct nfs_pgio_header *hdr)
2684 {
2685         struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
2686
2687         if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
2688                 list_splice_tail_init(&hdr->pages, &mirror->pg_list);
2689                 nfs_pageio_reset_read_mds(desc);
2690                 mirror->pg_recoalesce = 1;
2691         }
2692         hdr->completion_ops->completion(hdr);
2693 }
2694
2695 /*
2696  * Call the appropriate parallel I/O subsystem read function.
2697  */
2698 static enum pnfs_try_status
2699 pnfs_try_to_read_data(struct nfs_pgio_header *hdr,
2700                        const struct rpc_call_ops *call_ops,
2701                        struct pnfs_layout_segment *lseg)
2702 {
2703         struct inode *inode = hdr->inode;
2704         struct nfs_server *nfss = NFS_SERVER(inode);
2705         enum pnfs_try_status trypnfs;
2706
2707         hdr->mds_ops = call_ops;
2708
2709         dprintk("%s: Reading ino:%lu %u@%llu\n",
2710                 __func__, inode->i_ino, hdr->args.count, hdr->args.offset);
2711
2712         trypnfs = nfss->pnfs_curr_ld->read_pagelist(hdr);
2713         if (trypnfs != PNFS_NOT_ATTEMPTED)
2714                 nfs_inc_stats(inode, NFSIOS_PNFS_READ);
2715         dprintk("%s End (trypnfs:%d)\n", __func__, trypnfs);
2716         return trypnfs;
2717 }
2718
2719 /* Resend all requests through pnfs. */
2720 void pnfs_read_resend_pnfs(struct nfs_pgio_header *hdr)
2721 {
2722         struct nfs_pageio_descriptor pgio;
2723
2724         if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
2725                 /* Prevent deadlocks with layoutreturn! */
2726                 pnfs_put_lseg(hdr->lseg);
2727                 hdr->lseg = NULL;
2728
2729                 nfs_pageio_init_read(&pgio, hdr->inode, false,
2730                                         hdr->completion_ops);
2731                 hdr->task.tk_status = nfs_pageio_resend(&pgio, hdr);
2732         }
2733 }
2734 EXPORT_SYMBOL_GPL(pnfs_read_resend_pnfs);
2735
2736 static void
2737 pnfs_do_read(struct nfs_pageio_descriptor *desc, struct nfs_pgio_header *hdr)
2738 {
2739         const struct rpc_call_ops *call_ops = desc->pg_rpc_callops;
2740         struct pnfs_layout_segment *lseg = desc->pg_lseg;
2741         enum pnfs_try_status trypnfs;
2742
2743         trypnfs = pnfs_try_to_read_data(hdr, call_ops, lseg);
2744         switch (trypnfs) {
2745         case PNFS_NOT_ATTEMPTED:
2746                 pnfs_read_through_mds(desc, hdr);
2747         case PNFS_ATTEMPTED:
2748                 break;
2749         case PNFS_TRY_AGAIN:
2750                 /* cleanup hdr and prepare to redo pnfs */
2751                 if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
2752                         struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
2753                         list_splice_init(&hdr->pages, &mirror->pg_list);
2754                         mirror->pg_recoalesce = 1;
2755                 }
2756                 hdr->mds_ops->rpc_release(hdr);
2757         }
2758 }
2759
2760 static void pnfs_readhdr_free(struct nfs_pgio_header *hdr)
2761 {
2762         pnfs_put_lseg(hdr->lseg);
2763         nfs_pgio_header_free(hdr);
2764 }
2765
2766 int
2767 pnfs_generic_pg_readpages(struct nfs_pageio_descriptor *desc)
2768 {
2769         struct nfs_pgio_header *hdr;
2770         int ret;
2771
2772         hdr = nfs_pgio_header_alloc(desc->pg_rw_ops);
2773         if (!hdr) {
2774                 desc->pg_error = -ENOMEM;
2775                 return desc->pg_error;
2776         }
2777         nfs_pgheader_init(desc, hdr, pnfs_readhdr_free);
2778         hdr->lseg = pnfs_get_lseg(desc->pg_lseg);
2779         ret = nfs_generic_pgio(desc, hdr);
2780         if (!ret)
2781                 pnfs_do_read(desc, hdr);
2782         return ret;
2783 }
2784 EXPORT_SYMBOL_GPL(pnfs_generic_pg_readpages);
2785
2786 static void pnfs_clear_layoutcommitting(struct inode *inode)
2787 {
2788         unsigned long *bitlock = &NFS_I(inode)->flags;
2789
2790         clear_bit_unlock(NFS_INO_LAYOUTCOMMITTING, bitlock);
2791         smp_mb__after_atomic();
2792         wake_up_bit(bitlock, NFS_INO_LAYOUTCOMMITTING);
2793 }
2794
2795 /*
2796  * There can be multiple RW segments.
2797  */
2798 static void pnfs_list_write_lseg(struct inode *inode, struct list_head *listp)
2799 {
2800         struct pnfs_layout_segment *lseg;
2801
2802         list_for_each_entry(lseg, &NFS_I(inode)->layout->plh_segs, pls_list) {
2803                 if (lseg->pls_range.iomode == IOMODE_RW &&
2804                     test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT, &lseg->pls_flags))
2805                         list_add(&lseg->pls_lc_list, listp);
2806         }
2807 }
2808
2809 static void pnfs_list_write_lseg_done(struct inode *inode, struct list_head *listp)
2810 {
2811         struct pnfs_layout_segment *lseg, *tmp;
2812
2813         /* Matched by references in pnfs_set_layoutcommit */
2814         list_for_each_entry_safe(lseg, tmp, listp, pls_lc_list) {
2815                 list_del_init(&lseg->pls_lc_list);
2816                 pnfs_put_lseg(lseg);
2817         }
2818
2819         pnfs_clear_layoutcommitting(inode);
2820 }
2821
2822 void pnfs_set_lo_fail(struct pnfs_layout_segment *lseg)
2823 {
2824         pnfs_layout_io_set_failed(lseg->pls_layout, lseg->pls_range.iomode);
2825 }
2826 EXPORT_SYMBOL_GPL(pnfs_set_lo_fail);
2827
2828 void
2829 pnfs_set_layoutcommit(struct inode *inode, struct pnfs_layout_segment *lseg,
2830                 loff_t end_pos)
2831 {
2832         struct nfs_inode *nfsi = NFS_I(inode);
2833         bool mark_as_dirty = false;
2834
2835         spin_lock(&inode->i_lock);
2836         if (!test_and_set_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags)) {
2837                 nfsi->layout->plh_lwb = end_pos;
2838                 mark_as_dirty = true;
2839                 dprintk("%s: Set layoutcommit for inode %lu ",
2840                         __func__, inode->i_ino);
2841         } else if (end_pos > nfsi->layout->plh_lwb)
2842                 nfsi->layout->plh_lwb = end_pos;
2843         if (!test_and_set_bit(NFS_LSEG_LAYOUTCOMMIT, &lseg->pls_flags)) {
2844                 /* references matched in nfs4_layoutcommit_release */
2845                 pnfs_get_lseg(lseg);
2846         }
2847         spin_unlock(&inode->i_lock);
2848         dprintk("%s: lseg %p end_pos %llu\n",
2849                 __func__, lseg, nfsi->layout->plh_lwb);
2850
2851         /* if pnfs_layoutcommit_inode() runs between inode locks, the next one
2852          * will be a noop because NFS_INO_LAYOUTCOMMIT will not be set */
2853         if (mark_as_dirty)
2854                 mark_inode_dirty_sync(inode);
2855 }
2856 EXPORT_SYMBOL_GPL(pnfs_set_layoutcommit);
2857
2858 void pnfs_cleanup_layoutcommit(struct nfs4_layoutcommit_data *data)
2859 {
2860         struct nfs_server *nfss = NFS_SERVER(data->args.inode);
2861
2862         if (nfss->pnfs_curr_ld->cleanup_layoutcommit)
2863                 nfss->pnfs_curr_ld->cleanup_layoutcommit(data);
2864         pnfs_list_write_lseg_done(data->args.inode, &data->lseg_list);
2865 }
2866
2867 /*
2868  * For the LAYOUT4_NFSV4_1_FILES layout type, NFS_DATA_SYNC WRITEs and
2869  * NFS_UNSTABLE WRITEs with a COMMIT to data servers must store enough
2870  * data to disk to allow the server to recover the data if it crashes.
2871  * LAYOUTCOMMIT is only needed when the NFL4_UFLG_COMMIT_THRU_MDS flag
2872  * is off, and a COMMIT is sent to a data server, or
2873  * if WRITEs to a data server return NFS_DATA_SYNC.
2874  */
2875 int
2876 pnfs_layoutcommit_inode(struct inode *inode, bool sync)
2877 {
2878         struct pnfs_layoutdriver_type *ld = NFS_SERVER(inode)->pnfs_curr_ld;
2879         struct nfs4_layoutcommit_data *data;
2880         struct nfs_inode *nfsi = NFS_I(inode);
2881         loff_t end_pos;
2882         int status;
2883
2884         if (!pnfs_layoutcommit_outstanding(inode))
2885                 return 0;
2886
2887         dprintk("--> %s inode %lu\n", __func__, inode->i_ino);
2888
2889         status = -EAGAIN;
2890         if (test_and_set_bit(NFS_INO_LAYOUTCOMMITTING, &nfsi->flags)) {
2891                 if (!sync)
2892                         goto out;
2893                 status = wait_on_bit_lock_action(&nfsi->flags,
2894                                 NFS_INO_LAYOUTCOMMITTING,
2895                                 nfs_wait_bit_killable,
2896                                 TASK_KILLABLE);
2897                 if (status)
2898                         goto out;
2899         }
2900
2901         status = -ENOMEM;
2902         /* Note kzalloc ensures data->res.seq_res.sr_slot == NULL */
2903         data = kzalloc(sizeof(*data), GFP_NOFS);
2904         if (!data)
2905                 goto clear_layoutcommitting;
2906
2907         status = 0;
2908         spin_lock(&inode->i_lock);
2909         if (!test_and_clear_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags))
2910                 goto out_unlock;
2911
2912         INIT_LIST_HEAD(&data->lseg_list);
2913         pnfs_list_write_lseg(inode, &data->lseg_list);
2914
2915         end_pos = nfsi->layout->plh_lwb;
2916
2917         nfs4_stateid_copy(&data->args.stateid, &nfsi->layout->plh_stateid);
2918         spin_unlock(&inode->i_lock);
2919
2920         data->args.inode = inode;
2921         data->cred = get_rpccred(nfsi->layout->plh_lc_cred);
2922         nfs_fattr_init(&data->fattr);
2923         data->args.bitmask = NFS_SERVER(inode)->cache_consistency_bitmask;
2924         data->res.fattr = &data->fattr;
2925         if (end_pos != 0)
2926                 data->args.lastbytewritten = end_pos - 1;
2927         else
2928                 data->args.lastbytewritten = U64_MAX;
2929         data->res.server = NFS_SERVER(inode);
2930
2931         if (ld->prepare_layoutcommit) {
2932                 status = ld->prepare_layoutcommit(&data->args);
2933                 if (status) {
2934                         put_rpccred(data->cred);
2935                         spin_lock(&inode->i_lock);
2936                         set_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags);
2937                         if (end_pos > nfsi->layout->plh_lwb)
2938                                 nfsi->layout->plh_lwb = end_pos;
2939                         goto out_unlock;
2940                 }
2941         }
2942
2943
2944         status = nfs4_proc_layoutcommit(data, sync);
2945 out:
2946         if (status)
2947                 mark_inode_dirty_sync(inode);
2948         dprintk("<-- %s status %d\n", __func__, status);
2949         return status;
2950 out_unlock:
2951         spin_unlock(&inode->i_lock);
2952         kfree(data);
2953 clear_layoutcommitting:
2954         pnfs_clear_layoutcommitting(inode);
2955         goto out;
2956 }
2957 EXPORT_SYMBOL_GPL(pnfs_layoutcommit_inode);
2958
2959 int
2960 pnfs_generic_sync(struct inode *inode, bool datasync)
2961 {
2962         return pnfs_layoutcommit_inode(inode, true);
2963 }
2964 EXPORT_SYMBOL_GPL(pnfs_generic_sync);
2965
2966 struct nfs4_threshold *pnfs_mdsthreshold_alloc(void)
2967 {
2968         struct nfs4_threshold *thp;
2969
2970         thp = kzalloc(sizeof(*thp), GFP_NOFS);
2971         if (!thp) {
2972                 dprintk("%s mdsthreshold allocation failed\n", __func__);
2973                 return NULL;
2974         }
2975         return thp;
2976 }
2977
2978 #if IS_ENABLED(CONFIG_NFS_V4_2)
2979 int
2980 pnfs_report_layoutstat(struct inode *inode, gfp_t gfp_flags)
2981 {
2982         struct pnfs_layoutdriver_type *ld = NFS_SERVER(inode)->pnfs_curr_ld;
2983         struct nfs_server *server = NFS_SERVER(inode);
2984         struct nfs_inode *nfsi = NFS_I(inode);
2985         struct nfs42_layoutstat_data *data;
2986         struct pnfs_layout_hdr *hdr;
2987         int status = 0;
2988
2989         if (!pnfs_enabled_sb(server) || !ld->prepare_layoutstats)
2990                 goto out;
2991
2992         if (!nfs_server_capable(inode, NFS_CAP_LAYOUTSTATS))
2993                 goto out;
2994
2995         if (test_and_set_bit(NFS_INO_LAYOUTSTATS, &nfsi->flags))
2996                 goto out;
2997
2998         spin_lock(&inode->i_lock);
2999         if (!NFS_I(inode)->layout) {
3000                 spin_unlock(&inode->i_lock);
3001                 goto out_clear_layoutstats;
3002         }
3003         hdr = NFS_I(inode)->layout;
3004         pnfs_get_layout_hdr(hdr);
3005         spin_unlock(&inode->i_lock);
3006
3007         data = kzalloc(sizeof(*data), gfp_flags);
3008         if (!data) {
3009                 status = -ENOMEM;
3010                 goto out_put;
3011         }
3012
3013         data->args.fh = NFS_FH(inode);
3014         data->args.inode = inode;
3015         status = ld->prepare_layoutstats(&data->args);
3016         if (status)
3017                 goto out_free;
3018
3019         status = nfs42_proc_layoutstats_generic(NFS_SERVER(inode), data);
3020
3021 out:
3022         dprintk("%s returns %d\n", __func__, status);
3023         return status;
3024
3025 out_free:
3026         kfree(data);
3027 out_put:
3028         pnfs_put_layout_hdr(hdr);
3029 out_clear_layoutstats:
3030         smp_mb__before_atomic();
3031         clear_bit(NFS_INO_LAYOUTSTATS, &nfsi->flags);
3032         smp_mb__after_atomic();
3033         goto out;
3034 }
3035 EXPORT_SYMBOL_GPL(pnfs_report_layoutstat);
3036 #endif
3037
3038 unsigned int layoutstats_timer;
3039 module_param(layoutstats_timer, uint, 0644);
3040 EXPORT_SYMBOL_GPL(layoutstats_timer);