Merge branch 'work.misc' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
[sfrench/cifs-2.6.git] / fs / nfs / pnfs.c
1 /*
2  *  pNFS functions to call and manage layout drivers.
3  *
4  *  Copyright (c) 2002 [year of first publication]
5  *  The Regents of the University of Michigan
6  *  All Rights Reserved
7  *
8  *  Dean Hildebrand <dhildebz@umich.edu>
9  *
10  *  Permission is granted to use, copy, create derivative works, and
11  *  redistribute this software and such derivative works for any purpose,
12  *  so long as the name of the University of Michigan is not used in
13  *  any advertising or publicity pertaining to the use or distribution
14  *  of this software without specific, written prior authorization. If
15  *  the above copyright notice or any other identification of the
16  *  University of Michigan is included in any copy of any portion of
17  *  this software, then the disclaimer below must also be included.
18  *
19  *  This software is provided as is, without representation or warranty
20  *  of any kind either express or implied, including without limitation
21  *  the implied warranties of merchantability, fitness for a particular
22  *  purpose, or noninfringement.  The Regents of the University of
23  *  Michigan shall not be liable for any damages, including special,
24  *  indirect, incidental, or consequential damages, with respect to any
25  *  claim arising out of or in connection with the use of the software,
26  *  even if it has been or is hereafter advised of the possibility of
27  *  such damages.
28  */
29
30 #include <linux/nfs_fs.h>
31 #include <linux/nfs_page.h>
32 #include <linux/module.h>
33 #include <linux/sort.h>
34 #include "internal.h"
35 #include "pnfs.h"
36 #include "iostat.h"
37 #include "nfs4trace.h"
38 #include "delegation.h"
39 #include "nfs42.h"
40
41 #define NFSDBG_FACILITY         NFSDBG_PNFS
42 #define PNFS_LAYOUTGET_RETRY_TIMEOUT (120*HZ)
43
44 /* Locking:
45  *
46  * pnfs_spinlock:
47  *      protects pnfs_modules_tbl.
48  */
49 static DEFINE_SPINLOCK(pnfs_spinlock);
50
51 /*
52  * pnfs_modules_tbl holds all pnfs modules
53  */
54 static LIST_HEAD(pnfs_modules_tbl);
55
56 static void pnfs_layoutreturn_before_put_layout_hdr(struct pnfs_layout_hdr *lo);
57 static void pnfs_free_returned_lsegs(struct pnfs_layout_hdr *lo,
58                 struct list_head *free_me,
59                 const struct pnfs_layout_range *range,
60                 u32 seq);
61 static bool pnfs_lseg_dec_and_remove_zero(struct pnfs_layout_segment *lseg,
62                                 struct list_head *tmp_list);
63
64 /* Return the registered pnfs layout driver module matching given id */
65 static struct pnfs_layoutdriver_type *
66 find_pnfs_driver_locked(u32 id)
67 {
68         struct pnfs_layoutdriver_type *local;
69
70         list_for_each_entry(local, &pnfs_modules_tbl, pnfs_tblid)
71                 if (local->id == id)
72                         goto out;
73         local = NULL;
74 out:
75         dprintk("%s: Searching for id %u, found %p\n", __func__, id, local);
76         return local;
77 }
78
79 static struct pnfs_layoutdriver_type *
80 find_pnfs_driver(u32 id)
81 {
82         struct pnfs_layoutdriver_type *local;
83
84         spin_lock(&pnfs_spinlock);
85         local = find_pnfs_driver_locked(id);
86         if (local != NULL && !try_module_get(local->owner)) {
87                 dprintk("%s: Could not grab reference on module\n", __func__);
88                 local = NULL;
89         }
90         spin_unlock(&pnfs_spinlock);
91         return local;
92 }
93
94 void
95 unset_pnfs_layoutdriver(struct nfs_server *nfss)
96 {
97         if (nfss->pnfs_curr_ld) {
98                 if (nfss->pnfs_curr_ld->clear_layoutdriver)
99                         nfss->pnfs_curr_ld->clear_layoutdriver(nfss);
100                 /* Decrement the MDS count. Purge the deviceid cache if zero */
101                 if (atomic_dec_and_test(&nfss->nfs_client->cl_mds_count))
102                         nfs4_deviceid_purge_client(nfss->nfs_client);
103                 module_put(nfss->pnfs_curr_ld->owner);
104         }
105         nfss->pnfs_curr_ld = NULL;
106 }
107
108 /*
109  * When the server sends a list of layout types, we choose one in the order
110  * given in the list below.
111  *
112  * FIXME: should this list be configurable in some fashion? module param?
113  *        mount option? something else?
114  */
115 static const u32 ld_prefs[] = {
116         LAYOUT_SCSI,
117         LAYOUT_BLOCK_VOLUME,
118         LAYOUT_OSD2_OBJECTS,
119         LAYOUT_FLEX_FILES,
120         LAYOUT_NFSV4_1_FILES,
121         0
122 };
123
124 static int
125 ld_cmp(const void *e1, const void *e2)
126 {
127         u32 ld1 = *((u32 *)e1);
128         u32 ld2 = *((u32 *)e2);
129         int i;
130
131         for (i = 0; ld_prefs[i] != 0; i++) {
132                 if (ld1 == ld_prefs[i])
133                         return -1;
134
135                 if (ld2 == ld_prefs[i])
136                         return 1;
137         }
138         return 0;
139 }
140
141 /*
142  * Try to set the server's pnfs module to the pnfs layout type specified by id.
143  * Currently only one pNFS layout driver per filesystem is supported.
144  *
145  * @ids array of layout types supported by MDS.
146  */
147 void
148 set_pnfs_layoutdriver(struct nfs_server *server, const struct nfs_fh *mntfh,
149                       struct nfs_fsinfo *fsinfo)
150 {
151         struct pnfs_layoutdriver_type *ld_type = NULL;
152         u32 id;
153         int i;
154
155         if (fsinfo->nlayouttypes == 0)
156                 goto out_no_driver;
157         if (!(server->nfs_client->cl_exchange_flags &
158                  (EXCHGID4_FLAG_USE_NON_PNFS | EXCHGID4_FLAG_USE_PNFS_MDS))) {
159                 printk(KERN_ERR "NFS: %s: cl_exchange_flags 0x%x\n",
160                         __func__, server->nfs_client->cl_exchange_flags);
161                 goto out_no_driver;
162         }
163
164         sort(fsinfo->layouttype, fsinfo->nlayouttypes,
165                 sizeof(*fsinfo->layouttype), ld_cmp, NULL);
166
167         for (i = 0; i < fsinfo->nlayouttypes; i++) {
168                 id = fsinfo->layouttype[i];
169                 ld_type = find_pnfs_driver(id);
170                 if (!ld_type) {
171                         request_module("%s-%u", LAYOUT_NFSV4_1_MODULE_PREFIX,
172                                         id);
173                         ld_type = find_pnfs_driver(id);
174                 }
175                 if (ld_type)
176                         break;
177         }
178
179         if (!ld_type) {
180                 dprintk("%s: No pNFS module found!\n", __func__);
181                 goto out_no_driver;
182         }
183
184         server->pnfs_curr_ld = ld_type;
185         if (ld_type->set_layoutdriver
186             && ld_type->set_layoutdriver(server, mntfh)) {
187                 printk(KERN_ERR "NFS: %s: Error initializing pNFS layout "
188                         "driver %u.\n", __func__, id);
189                 module_put(ld_type->owner);
190                 goto out_no_driver;
191         }
192         /* Bump the MDS count */
193         atomic_inc(&server->nfs_client->cl_mds_count);
194
195         dprintk("%s: pNFS module for %u set\n", __func__, id);
196         return;
197
198 out_no_driver:
199         dprintk("%s: Using NFSv4 I/O\n", __func__);
200         server->pnfs_curr_ld = NULL;
201 }
202
203 int
204 pnfs_register_layoutdriver(struct pnfs_layoutdriver_type *ld_type)
205 {
206         int status = -EINVAL;
207         struct pnfs_layoutdriver_type *tmp;
208
209         if (ld_type->id == 0) {
210                 printk(KERN_ERR "NFS: %s id 0 is reserved\n", __func__);
211                 return status;
212         }
213         if (!ld_type->alloc_lseg || !ld_type->free_lseg) {
214                 printk(KERN_ERR "NFS: %s Layout driver must provide "
215                        "alloc_lseg and free_lseg.\n", __func__);
216                 return status;
217         }
218
219         spin_lock(&pnfs_spinlock);
220         tmp = find_pnfs_driver_locked(ld_type->id);
221         if (!tmp) {
222                 list_add(&ld_type->pnfs_tblid, &pnfs_modules_tbl);
223                 status = 0;
224                 dprintk("%s Registering id:%u name:%s\n", __func__, ld_type->id,
225                         ld_type->name);
226         } else {
227                 printk(KERN_ERR "NFS: %s Module with id %d already loaded!\n",
228                         __func__, ld_type->id);
229         }
230         spin_unlock(&pnfs_spinlock);
231
232         return status;
233 }
234 EXPORT_SYMBOL_GPL(pnfs_register_layoutdriver);
235
236 void
237 pnfs_unregister_layoutdriver(struct pnfs_layoutdriver_type *ld_type)
238 {
239         dprintk("%s Deregistering id:%u\n", __func__, ld_type->id);
240         spin_lock(&pnfs_spinlock);
241         list_del(&ld_type->pnfs_tblid);
242         spin_unlock(&pnfs_spinlock);
243 }
244 EXPORT_SYMBOL_GPL(pnfs_unregister_layoutdriver);
245
246 /*
247  * pNFS client layout cache
248  */
249
250 /* Need to hold i_lock if caller does not already hold reference */
251 void
252 pnfs_get_layout_hdr(struct pnfs_layout_hdr *lo)
253 {
254         atomic_inc(&lo->plh_refcount);
255 }
256
257 static struct pnfs_layout_hdr *
258 pnfs_alloc_layout_hdr(struct inode *ino, gfp_t gfp_flags)
259 {
260         struct pnfs_layoutdriver_type *ld = NFS_SERVER(ino)->pnfs_curr_ld;
261         return ld->alloc_layout_hdr(ino, gfp_flags);
262 }
263
264 static void
265 pnfs_free_layout_hdr(struct pnfs_layout_hdr *lo)
266 {
267         struct nfs_server *server = NFS_SERVER(lo->plh_inode);
268         struct pnfs_layoutdriver_type *ld = server->pnfs_curr_ld;
269
270         if (!list_empty(&lo->plh_layouts)) {
271                 struct nfs_client *clp = server->nfs_client;
272
273                 spin_lock(&clp->cl_lock);
274                 list_del_init(&lo->plh_layouts);
275                 spin_unlock(&clp->cl_lock);
276         }
277         put_rpccred(lo->plh_lc_cred);
278         return ld->free_layout_hdr(lo);
279 }
280
281 static void
282 pnfs_detach_layout_hdr(struct pnfs_layout_hdr *lo)
283 {
284         struct nfs_inode *nfsi = NFS_I(lo->plh_inode);
285         dprintk("%s: freeing layout cache %p\n", __func__, lo);
286         nfsi->layout = NULL;
287         /* Reset MDS Threshold I/O counters */
288         nfsi->write_io = 0;
289         nfsi->read_io = 0;
290 }
291
292 void
293 pnfs_put_layout_hdr(struct pnfs_layout_hdr *lo)
294 {
295         struct inode *inode = lo->plh_inode;
296
297         pnfs_layoutreturn_before_put_layout_hdr(lo);
298
299         if (atomic_dec_and_lock(&lo->plh_refcount, &inode->i_lock)) {
300                 if (!list_empty(&lo->plh_segs))
301                         WARN_ONCE(1, "NFS: BUG unfreed layout segments.\n");
302                 pnfs_detach_layout_hdr(lo);
303                 spin_unlock(&inode->i_lock);
304                 pnfs_free_layout_hdr(lo);
305         }
306 }
307
308 static void
309 pnfs_set_plh_return_info(struct pnfs_layout_hdr *lo, enum pnfs_iomode iomode,
310                          u32 seq)
311 {
312         if (lo->plh_return_iomode != 0 && lo->plh_return_iomode != iomode)
313                 iomode = IOMODE_ANY;
314         lo->plh_return_iomode = iomode;
315         set_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags);
316         if (seq != 0) {
317                 WARN_ON_ONCE(lo->plh_return_seq != 0 && lo->plh_return_seq != seq);
318                 lo->plh_return_seq = seq;
319         }
320 }
321
322 static void
323 pnfs_clear_layoutreturn_info(struct pnfs_layout_hdr *lo)
324 {
325         struct pnfs_layout_segment *lseg;
326         lo->plh_return_iomode = 0;
327         lo->plh_return_seq = 0;
328         clear_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags);
329         list_for_each_entry(lseg, &lo->plh_segs, pls_list) {
330                 if (!test_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags))
331                         continue;
332                 pnfs_set_plh_return_info(lo, lseg->pls_range.iomode, 0);
333         }
334 }
335
336 static void pnfs_clear_layoutreturn_waitbit(struct pnfs_layout_hdr *lo)
337 {
338         clear_bit_unlock(NFS_LAYOUT_RETURN, &lo->plh_flags);
339         clear_bit(NFS_LAYOUT_RETURN_LOCK, &lo->plh_flags);
340         smp_mb__after_atomic();
341         wake_up_bit(&lo->plh_flags, NFS_LAYOUT_RETURN);
342         rpc_wake_up(&NFS_SERVER(lo->plh_inode)->roc_rpcwaitq);
343 }
344
345 static void
346 pnfs_clear_lseg_state(struct pnfs_layout_segment *lseg,
347                 struct list_head *free_me)
348 {
349         clear_bit(NFS_LSEG_ROC, &lseg->pls_flags);
350         clear_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags);
351         if (test_and_clear_bit(NFS_LSEG_VALID, &lseg->pls_flags))
352                 pnfs_lseg_dec_and_remove_zero(lseg, free_me);
353         if (test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT, &lseg->pls_flags))
354                 pnfs_lseg_dec_and_remove_zero(lseg, free_me);
355 }
356
357 /*
358  * Mark a pnfs_layout_hdr and all associated layout segments as invalid
359  *
360  * In order to continue using the pnfs_layout_hdr, a full recovery
361  * is required.
362  * Note that caller must hold inode->i_lock.
363  */
364 int
365 pnfs_mark_layout_stateid_invalid(struct pnfs_layout_hdr *lo,
366                 struct list_head *lseg_list)
367 {
368         struct pnfs_layout_range range = {
369                 .iomode = IOMODE_ANY,
370                 .offset = 0,
371                 .length = NFS4_MAX_UINT64,
372         };
373         struct pnfs_layout_segment *lseg, *next;
374
375         set_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags);
376         list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list)
377                 pnfs_clear_lseg_state(lseg, lseg_list);
378         pnfs_clear_layoutreturn_info(lo);
379         pnfs_free_returned_lsegs(lo, lseg_list, &range, 0);
380         if (test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags) &&
381             !test_and_set_bit(NFS_LAYOUT_RETURN_LOCK, &lo->plh_flags))
382                 pnfs_clear_layoutreturn_waitbit(lo);
383         return !list_empty(&lo->plh_segs);
384 }
385
386 static int
387 pnfs_iomode_to_fail_bit(u32 iomode)
388 {
389         return iomode == IOMODE_RW ?
390                 NFS_LAYOUT_RW_FAILED : NFS_LAYOUT_RO_FAILED;
391 }
392
393 static void
394 pnfs_layout_set_fail_bit(struct pnfs_layout_hdr *lo, int fail_bit)
395 {
396         lo->plh_retry_timestamp = jiffies;
397         if (!test_and_set_bit(fail_bit, &lo->plh_flags))
398                 atomic_inc(&lo->plh_refcount);
399 }
400
401 static void
402 pnfs_layout_clear_fail_bit(struct pnfs_layout_hdr *lo, int fail_bit)
403 {
404         if (test_and_clear_bit(fail_bit, &lo->plh_flags))
405                 atomic_dec(&lo->plh_refcount);
406 }
407
408 static void
409 pnfs_layout_io_set_failed(struct pnfs_layout_hdr *lo, u32 iomode)
410 {
411         struct inode *inode = lo->plh_inode;
412         struct pnfs_layout_range range = {
413                 .iomode = iomode,
414                 .offset = 0,
415                 .length = NFS4_MAX_UINT64,
416         };
417         LIST_HEAD(head);
418
419         spin_lock(&inode->i_lock);
420         pnfs_layout_set_fail_bit(lo, pnfs_iomode_to_fail_bit(iomode));
421         pnfs_mark_matching_lsegs_invalid(lo, &head, &range, 0);
422         spin_unlock(&inode->i_lock);
423         pnfs_free_lseg_list(&head);
424         dprintk("%s Setting layout IOMODE_%s fail bit\n", __func__,
425                         iomode == IOMODE_RW ?  "RW" : "READ");
426 }
427
428 static bool
429 pnfs_layout_io_test_failed(struct pnfs_layout_hdr *lo, u32 iomode)
430 {
431         unsigned long start, end;
432         int fail_bit = pnfs_iomode_to_fail_bit(iomode);
433
434         if (test_bit(fail_bit, &lo->plh_flags) == 0)
435                 return false;
436         end = jiffies;
437         start = end - PNFS_LAYOUTGET_RETRY_TIMEOUT;
438         if (!time_in_range(lo->plh_retry_timestamp, start, end)) {
439                 /* It is time to retry the failed layoutgets */
440                 pnfs_layout_clear_fail_bit(lo, fail_bit);
441                 return false;
442         }
443         return true;
444 }
445
446 static void
447 pnfs_init_lseg(struct pnfs_layout_hdr *lo, struct pnfs_layout_segment *lseg,
448                 const struct pnfs_layout_range *range,
449                 const nfs4_stateid *stateid)
450 {
451         INIT_LIST_HEAD(&lseg->pls_list);
452         INIT_LIST_HEAD(&lseg->pls_lc_list);
453         atomic_set(&lseg->pls_refcount, 1);
454         set_bit(NFS_LSEG_VALID, &lseg->pls_flags);
455         lseg->pls_layout = lo;
456         lseg->pls_range = *range;
457         lseg->pls_seq = be32_to_cpu(stateid->seqid);
458 }
459
460 static void pnfs_free_lseg(struct pnfs_layout_segment *lseg)
461 {
462         if (lseg != NULL) {
463                 struct inode *inode = lseg->pls_layout->plh_inode;
464                 NFS_SERVER(inode)->pnfs_curr_ld->free_lseg(lseg);
465         }
466 }
467
468 static void
469 pnfs_layout_remove_lseg(struct pnfs_layout_hdr *lo,
470                 struct pnfs_layout_segment *lseg)
471 {
472         WARN_ON(test_bit(NFS_LSEG_VALID, &lseg->pls_flags));
473         list_del_init(&lseg->pls_list);
474         /* Matched by pnfs_get_layout_hdr in pnfs_layout_insert_lseg */
475         atomic_dec(&lo->plh_refcount);
476         if (test_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags))
477                 return;
478         if (list_empty(&lo->plh_segs) &&
479             !test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags) &&
480             !test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags)) {
481                 if (atomic_read(&lo->plh_outstanding) == 0)
482                         set_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags);
483                 clear_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags);
484         }
485 }
486
487 static bool
488 pnfs_cache_lseg_for_layoutreturn(struct pnfs_layout_hdr *lo,
489                 struct pnfs_layout_segment *lseg)
490 {
491         if (test_and_clear_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags) &&
492             pnfs_layout_is_valid(lo)) {
493                 pnfs_set_plh_return_info(lo, lseg->pls_range.iomode, 0);
494                 list_move_tail(&lseg->pls_list, &lo->plh_return_segs);
495                 return true;
496         }
497         return false;
498 }
499
500 void
501 pnfs_put_lseg(struct pnfs_layout_segment *lseg)
502 {
503         struct pnfs_layout_hdr *lo;
504         struct inode *inode;
505
506         if (!lseg)
507                 return;
508
509         dprintk("%s: lseg %p ref %d valid %d\n", __func__, lseg,
510                 atomic_read(&lseg->pls_refcount),
511                 test_bit(NFS_LSEG_VALID, &lseg->pls_flags));
512
513         lo = lseg->pls_layout;
514         inode = lo->plh_inode;
515
516         if (atomic_dec_and_lock(&lseg->pls_refcount, &inode->i_lock)) {
517                 if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags)) {
518                         spin_unlock(&inode->i_lock);
519                         return;
520                 }
521                 pnfs_get_layout_hdr(lo);
522                 pnfs_layout_remove_lseg(lo, lseg);
523                 if (pnfs_cache_lseg_for_layoutreturn(lo, lseg))
524                         lseg = NULL;
525                 spin_unlock(&inode->i_lock);
526                 pnfs_free_lseg(lseg);
527                 pnfs_put_layout_hdr(lo);
528         }
529 }
530 EXPORT_SYMBOL_GPL(pnfs_put_lseg);
531
532 /*
533  * is l2 fully contained in l1?
534  *   start1                             end1
535  *   [----------------------------------)
536  *           start2           end2
537  *           [----------------)
538  */
539 static bool
540 pnfs_lseg_range_contained(const struct pnfs_layout_range *l1,
541                  const struct pnfs_layout_range *l2)
542 {
543         u64 start1 = l1->offset;
544         u64 end1 = pnfs_end_offset(start1, l1->length);
545         u64 start2 = l2->offset;
546         u64 end2 = pnfs_end_offset(start2, l2->length);
547
548         return (start1 <= start2) && (end1 >= end2);
549 }
550
551 static bool pnfs_lseg_dec_and_remove_zero(struct pnfs_layout_segment *lseg,
552                 struct list_head *tmp_list)
553 {
554         if (!atomic_dec_and_test(&lseg->pls_refcount))
555                 return false;
556         pnfs_layout_remove_lseg(lseg->pls_layout, lseg);
557         list_add(&lseg->pls_list, tmp_list);
558         return true;
559 }
560
561 /* Returns 1 if lseg is removed from list, 0 otherwise */
562 static int mark_lseg_invalid(struct pnfs_layout_segment *lseg,
563                              struct list_head *tmp_list)
564 {
565         int rv = 0;
566
567         if (test_and_clear_bit(NFS_LSEG_VALID, &lseg->pls_flags)) {
568                 /* Remove the reference keeping the lseg in the
569                  * list.  It will now be removed when all
570                  * outstanding io is finished.
571                  */
572                 dprintk("%s: lseg %p ref %d\n", __func__, lseg,
573                         atomic_read(&lseg->pls_refcount));
574                 if (pnfs_lseg_dec_and_remove_zero(lseg, tmp_list))
575                         rv = 1;
576         }
577         return rv;
578 }
579
580 /*
581  * Compare 2 layout stateid sequence ids, to see which is newer,
582  * taking into account wraparound issues.
583  */
584 static bool pnfs_seqid_is_newer(u32 s1, u32 s2)
585 {
586         return (s32)(s1 - s2) > 0;
587 }
588
589 static bool
590 pnfs_should_free_range(const struct pnfs_layout_range *lseg_range,
591                  const struct pnfs_layout_range *recall_range)
592 {
593         return (recall_range->iomode == IOMODE_ANY ||
594                 lseg_range->iomode == recall_range->iomode) &&
595                pnfs_lseg_range_intersecting(lseg_range, recall_range);
596 }
597
598 static bool
599 pnfs_match_lseg_recall(const struct pnfs_layout_segment *lseg,
600                 const struct pnfs_layout_range *recall_range,
601                 u32 seq)
602 {
603         if (seq != 0 && pnfs_seqid_is_newer(lseg->pls_seq, seq))
604                 return false;
605         if (recall_range == NULL)
606                 return true;
607         return pnfs_should_free_range(&lseg->pls_range, recall_range);
608 }
609
610 /**
611  * pnfs_mark_matching_lsegs_invalid - tear down lsegs or mark them for later
612  * @lo: layout header containing the lsegs
613  * @tmp_list: list head where doomed lsegs should go
614  * @recall_range: optional recall range argument to match (may be NULL)
615  * @seq: only invalidate lsegs obtained prior to this sequence (may be 0)
616  *
617  * Walk the list of lsegs in the layout header, and tear down any that should
618  * be destroyed. If "recall_range" is specified then the segment must match
619  * that range. If "seq" is non-zero, then only match segments that were handed
620  * out at or before that sequence.
621  *
622  * Returns number of matching invalid lsegs remaining in list after scanning
623  * it and purging them.
624  */
625 int
626 pnfs_mark_matching_lsegs_invalid(struct pnfs_layout_hdr *lo,
627                             struct list_head *tmp_list,
628                             const struct pnfs_layout_range *recall_range,
629                             u32 seq)
630 {
631         struct pnfs_layout_segment *lseg, *next;
632         int remaining = 0;
633
634         dprintk("%s:Begin lo %p\n", __func__, lo);
635
636         if (list_empty(&lo->plh_segs))
637                 return 0;
638         list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list)
639                 if (pnfs_match_lseg_recall(lseg, recall_range, seq)) {
640                         dprintk("%s: freeing lseg %p iomode %d seq %u"
641                                 "offset %llu length %llu\n", __func__,
642                                 lseg, lseg->pls_range.iomode, lseg->pls_seq,
643                                 lseg->pls_range.offset, lseg->pls_range.length);
644                         if (!mark_lseg_invalid(lseg, tmp_list))
645                                 remaining++;
646                 }
647         dprintk("%s:Return %i\n", __func__, remaining);
648         return remaining;
649 }
650
651 static void
652 pnfs_free_returned_lsegs(struct pnfs_layout_hdr *lo,
653                 struct list_head *free_me,
654                 const struct pnfs_layout_range *range,
655                 u32 seq)
656 {
657         struct pnfs_layout_segment *lseg, *next;
658
659         list_for_each_entry_safe(lseg, next, &lo->plh_return_segs, pls_list) {
660                 if (pnfs_match_lseg_recall(lseg, range, seq))
661                         list_move_tail(&lseg->pls_list, free_me);
662         }
663 }
664
665 /* note free_me must contain lsegs from a single layout_hdr */
666 void
667 pnfs_free_lseg_list(struct list_head *free_me)
668 {
669         struct pnfs_layout_segment *lseg, *tmp;
670
671         if (list_empty(free_me))
672                 return;
673
674         list_for_each_entry_safe(lseg, tmp, free_me, pls_list) {
675                 list_del(&lseg->pls_list);
676                 pnfs_free_lseg(lseg);
677         }
678 }
679
680 void
681 pnfs_destroy_layout(struct nfs_inode *nfsi)
682 {
683         struct pnfs_layout_hdr *lo;
684         LIST_HEAD(tmp_list);
685
686         spin_lock(&nfsi->vfs_inode.i_lock);
687         lo = nfsi->layout;
688         if (lo) {
689                 pnfs_get_layout_hdr(lo);
690                 pnfs_mark_layout_stateid_invalid(lo, &tmp_list);
691                 pnfs_layout_clear_fail_bit(lo, NFS_LAYOUT_RO_FAILED);
692                 pnfs_layout_clear_fail_bit(lo, NFS_LAYOUT_RW_FAILED);
693                 spin_unlock(&nfsi->vfs_inode.i_lock);
694                 pnfs_free_lseg_list(&tmp_list);
695                 nfs_commit_inode(&nfsi->vfs_inode, 0);
696                 pnfs_put_layout_hdr(lo);
697         } else
698                 spin_unlock(&nfsi->vfs_inode.i_lock);
699 }
700 EXPORT_SYMBOL_GPL(pnfs_destroy_layout);
701
702 static bool
703 pnfs_layout_add_bulk_destroy_list(struct inode *inode,
704                 struct list_head *layout_list)
705 {
706         struct pnfs_layout_hdr *lo;
707         bool ret = false;
708
709         spin_lock(&inode->i_lock);
710         lo = NFS_I(inode)->layout;
711         if (lo != NULL && list_empty(&lo->plh_bulk_destroy)) {
712                 pnfs_get_layout_hdr(lo);
713                 list_add(&lo->plh_bulk_destroy, layout_list);
714                 ret = true;
715         }
716         spin_unlock(&inode->i_lock);
717         return ret;
718 }
719
720 /* Caller must hold rcu_read_lock and clp->cl_lock */
721 static int
722 pnfs_layout_bulk_destroy_byserver_locked(struct nfs_client *clp,
723                 struct nfs_server *server,
724                 struct list_head *layout_list)
725 {
726         struct pnfs_layout_hdr *lo, *next;
727         struct inode *inode;
728
729         list_for_each_entry_safe(lo, next, &server->layouts, plh_layouts) {
730                 if (test_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags))
731                         continue;
732                 inode = igrab(lo->plh_inode);
733                 if (inode == NULL)
734                         continue;
735                 list_del_init(&lo->plh_layouts);
736                 if (pnfs_layout_add_bulk_destroy_list(inode, layout_list))
737                         continue;
738                 rcu_read_unlock();
739                 spin_unlock(&clp->cl_lock);
740                 iput(inode);
741                 spin_lock(&clp->cl_lock);
742                 rcu_read_lock();
743                 return -EAGAIN;
744         }
745         return 0;
746 }
747
748 static int
749 pnfs_layout_free_bulk_destroy_list(struct list_head *layout_list,
750                 bool is_bulk_recall)
751 {
752         struct pnfs_layout_hdr *lo;
753         struct inode *inode;
754         LIST_HEAD(lseg_list);
755         int ret = 0;
756
757         while (!list_empty(layout_list)) {
758                 lo = list_entry(layout_list->next, struct pnfs_layout_hdr,
759                                 plh_bulk_destroy);
760                 dprintk("%s freeing layout for inode %lu\n", __func__,
761                         lo->plh_inode->i_ino);
762                 inode = lo->plh_inode;
763
764                 pnfs_layoutcommit_inode(inode, false);
765
766                 spin_lock(&inode->i_lock);
767                 list_del_init(&lo->plh_bulk_destroy);
768                 if (pnfs_mark_layout_stateid_invalid(lo, &lseg_list)) {
769                         if (is_bulk_recall)
770                                 set_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags);
771                         ret = -EAGAIN;
772                 }
773                 spin_unlock(&inode->i_lock);
774                 pnfs_free_lseg_list(&lseg_list);
775                 /* Free all lsegs that are attached to commit buckets */
776                 nfs_commit_inode(inode, 0);
777                 pnfs_put_layout_hdr(lo);
778                 iput(inode);
779         }
780         return ret;
781 }
782
783 int
784 pnfs_destroy_layouts_byfsid(struct nfs_client *clp,
785                 struct nfs_fsid *fsid,
786                 bool is_recall)
787 {
788         struct nfs_server *server;
789         LIST_HEAD(layout_list);
790
791         spin_lock(&clp->cl_lock);
792         rcu_read_lock();
793 restart:
794         list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
795                 if (memcmp(&server->fsid, fsid, sizeof(*fsid)) != 0)
796                         continue;
797                 if (pnfs_layout_bulk_destroy_byserver_locked(clp,
798                                 server,
799                                 &layout_list) != 0)
800                         goto restart;
801         }
802         rcu_read_unlock();
803         spin_unlock(&clp->cl_lock);
804
805         if (list_empty(&layout_list))
806                 return 0;
807         return pnfs_layout_free_bulk_destroy_list(&layout_list, is_recall);
808 }
809
810 int
811 pnfs_destroy_layouts_byclid(struct nfs_client *clp,
812                 bool is_recall)
813 {
814         struct nfs_server *server;
815         LIST_HEAD(layout_list);
816
817         spin_lock(&clp->cl_lock);
818         rcu_read_lock();
819 restart:
820         list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
821                 if (pnfs_layout_bulk_destroy_byserver_locked(clp,
822                                         server,
823                                         &layout_list) != 0)
824                         goto restart;
825         }
826         rcu_read_unlock();
827         spin_unlock(&clp->cl_lock);
828
829         if (list_empty(&layout_list))
830                 return 0;
831         return pnfs_layout_free_bulk_destroy_list(&layout_list, is_recall);
832 }
833
834 /*
835  * Called by the state manger to remove all layouts established under an
836  * expired lease.
837  */
838 void
839 pnfs_destroy_all_layouts(struct nfs_client *clp)
840 {
841         nfs4_deviceid_mark_client_invalid(clp);
842         nfs4_deviceid_purge_client(clp);
843
844         pnfs_destroy_layouts_byclid(clp, false);
845 }
846
847 /* update lo->plh_stateid with new if is more recent */
848 void
849 pnfs_set_layout_stateid(struct pnfs_layout_hdr *lo, const nfs4_stateid *new,
850                         bool update_barrier)
851 {
852         u32 oldseq, newseq, new_barrier = 0;
853
854         oldseq = be32_to_cpu(lo->plh_stateid.seqid);
855         newseq = be32_to_cpu(new->seqid);
856
857         if (!pnfs_layout_is_valid(lo)) {
858                 nfs4_stateid_copy(&lo->plh_stateid, new);
859                 lo->plh_barrier = newseq;
860                 pnfs_clear_layoutreturn_info(lo);
861                 clear_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags);
862                 return;
863         }
864         if (pnfs_seqid_is_newer(newseq, oldseq)) {
865                 nfs4_stateid_copy(&lo->plh_stateid, new);
866                 /*
867                  * Because of wraparound, we want to keep the barrier
868                  * "close" to the current seqids.
869                  */
870                 new_barrier = newseq - atomic_read(&lo->plh_outstanding);
871         }
872         if (update_barrier)
873                 new_barrier = be32_to_cpu(new->seqid);
874         else if (new_barrier == 0)
875                 return;
876         if (pnfs_seqid_is_newer(new_barrier, lo->plh_barrier))
877                 lo->plh_barrier = new_barrier;
878 }
879
880 static bool
881 pnfs_layout_stateid_blocked(const struct pnfs_layout_hdr *lo,
882                 const nfs4_stateid *stateid)
883 {
884         u32 seqid = be32_to_cpu(stateid->seqid);
885
886         return !pnfs_seqid_is_newer(seqid, lo->plh_barrier);
887 }
888
889 /* lget is set to 1 if called from inside send_layoutget call chain */
890 static bool
891 pnfs_layoutgets_blocked(const struct pnfs_layout_hdr *lo)
892 {
893         return lo->plh_block_lgets ||
894                 test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags);
895 }
896
897 /*
898  * Get layout from server.
899  *    for now, assume that whole file layouts are requested.
900  *    arg->offset: 0
901  *    arg->length: all ones
902  */
903 static struct pnfs_layout_segment *
904 send_layoutget(struct pnfs_layout_hdr *lo,
905            struct nfs_open_context *ctx,
906            nfs4_stateid *stateid,
907            const struct pnfs_layout_range *range,
908            long *timeout, gfp_t gfp_flags)
909 {
910         struct inode *ino = lo->plh_inode;
911         struct nfs_server *server = NFS_SERVER(ino);
912         struct nfs4_layoutget *lgp;
913         loff_t i_size;
914
915         dprintk("--> %s\n", __func__);
916
917         /*
918          * Synchronously retrieve layout information from server and
919          * store in lseg. If we race with a concurrent seqid morphing
920          * op, then re-send the LAYOUTGET.
921          */
922         lgp = kzalloc(sizeof(*lgp), gfp_flags);
923         if (lgp == NULL)
924                 return ERR_PTR(-ENOMEM);
925
926         i_size = i_size_read(ino);
927
928         lgp->args.minlength = PAGE_SIZE;
929         if (lgp->args.minlength > range->length)
930                 lgp->args.minlength = range->length;
931         if (range->iomode == IOMODE_READ) {
932                 if (range->offset >= i_size)
933                         lgp->args.minlength = 0;
934                 else if (i_size - range->offset < lgp->args.minlength)
935                         lgp->args.minlength = i_size - range->offset;
936         }
937         lgp->args.maxcount = PNFS_LAYOUT_MAXSIZE;
938         pnfs_copy_range(&lgp->args.range, range);
939         lgp->args.type = server->pnfs_curr_ld->id;
940         lgp->args.inode = ino;
941         lgp->args.ctx = get_nfs_open_context(ctx);
942         nfs4_stateid_copy(&lgp->args.stateid, stateid);
943         lgp->gfp_flags = gfp_flags;
944         lgp->cred = lo->plh_lc_cred;
945
946         return nfs4_proc_layoutget(lgp, timeout, gfp_flags);
947 }
948
949 static void pnfs_clear_layoutcommit(struct inode *inode,
950                 struct list_head *head)
951 {
952         struct nfs_inode *nfsi = NFS_I(inode);
953         struct pnfs_layout_segment *lseg, *tmp;
954
955         if (!test_and_clear_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags))
956                 return;
957         list_for_each_entry_safe(lseg, tmp, &nfsi->layout->plh_segs, pls_list) {
958                 if (!test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT, &lseg->pls_flags))
959                         continue;
960                 pnfs_lseg_dec_and_remove_zero(lseg, head);
961         }
962 }
963
964 void pnfs_layoutreturn_free_lsegs(struct pnfs_layout_hdr *lo,
965                 const nfs4_stateid *arg_stateid,
966                 const struct pnfs_layout_range *range,
967                 const nfs4_stateid *stateid)
968 {
969         struct inode *inode = lo->plh_inode;
970         LIST_HEAD(freeme);
971
972         spin_lock(&inode->i_lock);
973         if (!pnfs_layout_is_valid(lo) || !arg_stateid ||
974             !nfs4_stateid_match_other(&lo->plh_stateid, arg_stateid))
975                 goto out_unlock;
976         if (stateid) {
977                 u32 seq = be32_to_cpu(arg_stateid->seqid);
978
979                 pnfs_mark_matching_lsegs_invalid(lo, &freeme, range, seq);
980                 pnfs_free_returned_lsegs(lo, &freeme, range, seq);
981                 pnfs_set_layout_stateid(lo, stateid, true);
982         } else
983                 pnfs_mark_layout_stateid_invalid(lo, &freeme);
984 out_unlock:
985         pnfs_clear_layoutreturn_waitbit(lo);
986         spin_unlock(&inode->i_lock);
987         pnfs_free_lseg_list(&freeme);
988
989 }
990
991 static bool
992 pnfs_prepare_layoutreturn(struct pnfs_layout_hdr *lo,
993                 nfs4_stateid *stateid,
994                 enum pnfs_iomode *iomode)
995 {
996         /* Serialise LAYOUTGET/LAYOUTRETURN */
997         if (atomic_read(&lo->plh_outstanding) != 0)
998                 return false;
999         if (test_and_set_bit(NFS_LAYOUT_RETURN_LOCK, &lo->plh_flags))
1000                 return false;
1001         set_bit(NFS_LAYOUT_RETURN, &lo->plh_flags);
1002         pnfs_get_layout_hdr(lo);
1003         if (test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags)) {
1004                 if (stateid != NULL) {
1005                         nfs4_stateid_copy(stateid, &lo->plh_stateid);
1006                         if (lo->plh_return_seq != 0)
1007                                 stateid->seqid = cpu_to_be32(lo->plh_return_seq);
1008                 }
1009                 if (iomode != NULL)
1010                         *iomode = lo->plh_return_iomode;
1011                 pnfs_clear_layoutreturn_info(lo);
1012                 return true;
1013         }
1014         if (stateid != NULL)
1015                 nfs4_stateid_copy(stateid, &lo->plh_stateid);
1016         if (iomode != NULL)
1017                 *iomode = IOMODE_ANY;
1018         return true;
1019 }
1020
1021 static void
1022 pnfs_init_layoutreturn_args(struct nfs4_layoutreturn_args *args,
1023                 struct pnfs_layout_hdr *lo,
1024                 const nfs4_stateid *stateid,
1025                 enum pnfs_iomode iomode)
1026 {
1027         struct inode *inode = lo->plh_inode;
1028
1029         args->layout_type = NFS_SERVER(inode)->pnfs_curr_ld->id;
1030         args->inode = inode;
1031         args->range.iomode = iomode;
1032         args->range.offset = 0;
1033         args->range.length = NFS4_MAX_UINT64;
1034         args->layout = lo;
1035         nfs4_stateid_copy(&args->stateid, stateid);
1036 }
1037
1038 static int
1039 pnfs_send_layoutreturn(struct pnfs_layout_hdr *lo, const nfs4_stateid *stateid,
1040                        enum pnfs_iomode iomode, bool sync)
1041 {
1042         struct inode *ino = lo->plh_inode;
1043         struct pnfs_layoutdriver_type *ld = NFS_SERVER(ino)->pnfs_curr_ld;
1044         struct nfs4_layoutreturn *lrp;
1045         int status = 0;
1046
1047         lrp = kzalloc(sizeof(*lrp), GFP_NOFS);
1048         if (unlikely(lrp == NULL)) {
1049                 status = -ENOMEM;
1050                 spin_lock(&ino->i_lock);
1051                 pnfs_clear_layoutreturn_waitbit(lo);
1052                 spin_unlock(&ino->i_lock);
1053                 pnfs_put_layout_hdr(lo);
1054                 goto out;
1055         }
1056
1057         pnfs_init_layoutreturn_args(&lrp->args, lo, stateid, iomode);
1058         lrp->args.ld_private = &lrp->ld_private;
1059         lrp->clp = NFS_SERVER(ino)->nfs_client;
1060         lrp->cred = lo->plh_lc_cred;
1061         if (ld->prepare_layoutreturn)
1062                 ld->prepare_layoutreturn(&lrp->args);
1063
1064         status = nfs4_proc_layoutreturn(lrp, sync);
1065 out:
1066         dprintk("<-- %s status: %d\n", __func__, status);
1067         return status;
1068 }
1069
1070 /* Return true if layoutreturn is needed */
1071 static bool
1072 pnfs_layout_need_return(struct pnfs_layout_hdr *lo)
1073 {
1074         struct pnfs_layout_segment *s;
1075
1076         if (!test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags))
1077                 return false;
1078
1079         /* Defer layoutreturn until all lsegs are done */
1080         list_for_each_entry(s, &lo->plh_segs, pls_list) {
1081                 if (test_bit(NFS_LSEG_LAYOUTRETURN, &s->pls_flags))
1082                         return false;
1083         }
1084
1085         return true;
1086 }
1087
1088 static void pnfs_layoutreturn_before_put_layout_hdr(struct pnfs_layout_hdr *lo)
1089 {
1090         struct inode *inode= lo->plh_inode;
1091
1092         if (!test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags))
1093                 return;
1094         spin_lock(&inode->i_lock);
1095         if (pnfs_layout_need_return(lo)) {
1096                 nfs4_stateid stateid;
1097                 enum pnfs_iomode iomode;
1098                 bool send;
1099
1100                 send = pnfs_prepare_layoutreturn(lo, &stateid, &iomode);
1101                 spin_unlock(&inode->i_lock);
1102                 if (send) {
1103                         /* Send an async layoutreturn so we dont deadlock */
1104                         pnfs_send_layoutreturn(lo, &stateid, iomode, false);
1105                 }
1106         } else
1107                 spin_unlock(&inode->i_lock);
1108 }
1109
1110 /*
1111  * Initiates a LAYOUTRETURN(FILE), and removes the pnfs_layout_hdr
1112  * when the layout segment list is empty.
1113  *
1114  * Note that a pnfs_layout_hdr can exist with an empty layout segment
1115  * list when LAYOUTGET has failed, or when LAYOUTGET succeeded, but the
1116  * deviceid is marked invalid.
1117  */
1118 int
1119 _pnfs_return_layout(struct inode *ino)
1120 {
1121         struct pnfs_layout_hdr *lo = NULL;
1122         struct nfs_inode *nfsi = NFS_I(ino);
1123         LIST_HEAD(tmp_list);
1124         nfs4_stateid stateid;
1125         int status = 0;
1126         bool send;
1127
1128         dprintk("NFS: %s for inode %lu\n", __func__, ino->i_ino);
1129
1130         spin_lock(&ino->i_lock);
1131         lo = nfsi->layout;
1132         if (!lo) {
1133                 spin_unlock(&ino->i_lock);
1134                 dprintk("NFS: %s no layout to return\n", __func__);
1135                 goto out;
1136         }
1137         /* Reference matched in nfs4_layoutreturn_release */
1138         pnfs_get_layout_hdr(lo);
1139         /* Is there an outstanding layoutreturn ? */
1140         if (test_bit(NFS_LAYOUT_RETURN_LOCK, &lo->plh_flags)) {
1141                 spin_unlock(&ino->i_lock);
1142                 if (wait_on_bit(&lo->plh_flags, NFS_LAYOUT_RETURN,
1143                                         TASK_UNINTERRUPTIBLE))
1144                         goto out_put_layout_hdr;
1145                 spin_lock(&ino->i_lock);
1146         }
1147         pnfs_clear_layoutcommit(ino, &tmp_list);
1148         pnfs_mark_matching_lsegs_invalid(lo, &tmp_list, NULL, 0);
1149
1150         if (NFS_SERVER(ino)->pnfs_curr_ld->return_range) {
1151                 struct pnfs_layout_range range = {
1152                         .iomode         = IOMODE_ANY,
1153                         .offset         = 0,
1154                         .length         = NFS4_MAX_UINT64,
1155                 };
1156                 NFS_SERVER(ino)->pnfs_curr_ld->return_range(lo, &range);
1157         }
1158
1159         /* Don't send a LAYOUTRETURN if list was initially empty */
1160         if (!test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags)) {
1161                 spin_unlock(&ino->i_lock);
1162                 dprintk("NFS: %s no layout segments to return\n", __func__);
1163                 goto out_put_layout_hdr;
1164         }
1165
1166         send = pnfs_prepare_layoutreturn(lo, &stateid, NULL);
1167         spin_unlock(&ino->i_lock);
1168         if (send)
1169                 status = pnfs_send_layoutreturn(lo, &stateid, IOMODE_ANY, true);
1170 out_put_layout_hdr:
1171         pnfs_free_lseg_list(&tmp_list);
1172         pnfs_put_layout_hdr(lo);
1173 out:
1174         dprintk("<-- %s status: %d\n", __func__, status);
1175         return status;
1176 }
1177
1178 int
1179 pnfs_commit_and_return_layout(struct inode *inode)
1180 {
1181         struct pnfs_layout_hdr *lo;
1182         int ret;
1183
1184         spin_lock(&inode->i_lock);
1185         lo = NFS_I(inode)->layout;
1186         if (lo == NULL) {
1187                 spin_unlock(&inode->i_lock);
1188                 return 0;
1189         }
1190         pnfs_get_layout_hdr(lo);
1191         /* Block new layoutgets and read/write to ds */
1192         lo->plh_block_lgets++;
1193         spin_unlock(&inode->i_lock);
1194         filemap_fdatawait(inode->i_mapping);
1195         ret = pnfs_layoutcommit_inode(inode, true);
1196         if (ret == 0)
1197                 ret = _pnfs_return_layout(inode);
1198         spin_lock(&inode->i_lock);
1199         lo->plh_block_lgets--;
1200         spin_unlock(&inode->i_lock);
1201         pnfs_put_layout_hdr(lo);
1202         return ret;
1203 }
1204
1205 bool pnfs_roc(struct inode *ino,
1206                 struct nfs4_layoutreturn_args *args,
1207                 struct nfs4_layoutreturn_res *res,
1208                 const struct rpc_cred *cred)
1209 {
1210         struct nfs_inode *nfsi = NFS_I(ino);
1211         struct nfs_open_context *ctx;
1212         struct nfs4_state *state;
1213         struct pnfs_layout_hdr *lo;
1214         struct pnfs_layout_segment *lseg, *next;
1215         nfs4_stateid stateid;
1216         enum pnfs_iomode iomode = 0;
1217         bool layoutreturn = false, roc = false;
1218         bool skip_read = false;
1219
1220         if (!nfs_have_layout(ino))
1221                 return false;
1222 retry:
1223         spin_lock(&ino->i_lock);
1224         lo = nfsi->layout;
1225         if (!lo || !pnfs_layout_is_valid(lo) ||
1226             test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags))
1227                 goto out_noroc;
1228         if (test_bit(NFS_LAYOUT_RETURN_LOCK, &lo->plh_flags)) {
1229                 pnfs_get_layout_hdr(lo);
1230                 spin_unlock(&ino->i_lock);
1231                 wait_on_bit(&lo->plh_flags, NFS_LAYOUT_RETURN,
1232                                 TASK_UNINTERRUPTIBLE);
1233                 pnfs_put_layout_hdr(lo);
1234                 goto retry;
1235         }
1236
1237         /* no roc if we hold a delegation */
1238         if (nfs4_check_delegation(ino, FMODE_READ)) {
1239                 if (nfs4_check_delegation(ino, FMODE_WRITE))
1240                         goto out_noroc;
1241                 skip_read = true;
1242         }
1243
1244         list_for_each_entry(ctx, &nfsi->open_files, list) {
1245                 state = ctx->state;
1246                 if (state == NULL)
1247                         continue;
1248                 /* Don't return layout if there is open file state */
1249                 if (state->state & FMODE_WRITE)
1250                         goto out_noroc;
1251                 if (state->state & FMODE_READ)
1252                         skip_read = true;
1253         }
1254
1255
1256         list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list) {
1257                 if (skip_read && lseg->pls_range.iomode == IOMODE_READ)
1258                         continue;
1259                 /* If we are sending layoutreturn, invalidate all valid lsegs */
1260                 if (!test_and_clear_bit(NFS_LSEG_ROC, &lseg->pls_flags))
1261                         continue;
1262                 /*
1263                  * Note: mark lseg for return so pnfs_layout_remove_lseg
1264                  * doesn't invalidate the layout for us.
1265                  */
1266                 set_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags);
1267                 if (!mark_lseg_invalid(lseg, &lo->plh_return_segs))
1268                         continue;
1269                 pnfs_set_plh_return_info(lo, lseg->pls_range.iomode, 0);
1270         }
1271
1272         if (!test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags))
1273                 goto out_noroc;
1274
1275         /* ROC in two conditions:
1276          * 1. there are ROC lsegs
1277          * 2. we don't send layoutreturn
1278          */
1279         /* lo ref dropped in pnfs_roc_release() */
1280         layoutreturn = pnfs_prepare_layoutreturn(lo, &stateid, &iomode);
1281         /* If the creds don't match, we can't compound the layoutreturn */
1282         if (!layoutreturn || cred != lo->plh_lc_cred)
1283                 goto out_noroc;
1284
1285         roc = layoutreturn;
1286         pnfs_init_layoutreturn_args(args, lo, &stateid, iomode);
1287         res->lrs_present = 0;
1288         layoutreturn = false;
1289
1290 out_noroc:
1291         spin_unlock(&ino->i_lock);
1292         pnfs_layoutcommit_inode(ino, true);
1293         if (roc) {
1294                 struct pnfs_layoutdriver_type *ld = NFS_SERVER(ino)->pnfs_curr_ld;
1295                 if (ld->prepare_layoutreturn)
1296                         ld->prepare_layoutreturn(args);
1297                 return true;
1298         }
1299         if (layoutreturn)
1300                 pnfs_send_layoutreturn(lo, &stateid, iomode, true);
1301         return false;
1302 }
1303
1304 void pnfs_roc_release(struct nfs4_layoutreturn_args *args,
1305                 struct nfs4_layoutreturn_res *res,
1306                 int ret)
1307 {
1308         struct pnfs_layout_hdr *lo = args->layout;
1309         const nfs4_stateid *arg_stateid = NULL;
1310         const nfs4_stateid *res_stateid = NULL;
1311         struct nfs4_xdr_opaque_data *ld_private = args->ld_private;
1312
1313         if (ret == 0) {
1314                 arg_stateid = &args->stateid;
1315                 if (res->lrs_present)
1316                         res_stateid = &res->stateid;
1317         }
1318         pnfs_layoutreturn_free_lsegs(lo, arg_stateid, &args->range,
1319                         res_stateid);
1320         if (ld_private && ld_private->ops && ld_private->ops->free)
1321                 ld_private->ops->free(ld_private);
1322         pnfs_put_layout_hdr(lo);
1323         trace_nfs4_layoutreturn_on_close(args->inode, 0);
1324 }
1325
1326 bool pnfs_wait_on_layoutreturn(struct inode *ino, struct rpc_task *task)
1327 {
1328         struct nfs_inode *nfsi = NFS_I(ino);
1329         struct pnfs_layout_hdr *lo;
1330         bool sleep = false;
1331
1332         /* we might not have grabbed lo reference. so need to check under
1333          * i_lock */
1334         spin_lock(&ino->i_lock);
1335         lo = nfsi->layout;
1336         if (lo && test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags)) {
1337                 rpc_sleep_on(&NFS_SERVER(ino)->roc_rpcwaitq, task, NULL);
1338                 sleep = true;
1339         }
1340         spin_unlock(&ino->i_lock);
1341         return sleep;
1342 }
1343
1344 /*
1345  * Compare two layout segments for sorting into layout cache.
1346  * We want to preferentially return RW over RO layouts, so ensure those
1347  * are seen first.
1348  */
1349 static s64
1350 pnfs_lseg_range_cmp(const struct pnfs_layout_range *l1,
1351            const struct pnfs_layout_range *l2)
1352 {
1353         s64 d;
1354
1355         /* high offset > low offset */
1356         d = l1->offset - l2->offset;
1357         if (d)
1358                 return d;
1359
1360         /* short length > long length */
1361         d = l2->length - l1->length;
1362         if (d)
1363                 return d;
1364
1365         /* read > read/write */
1366         return (int)(l1->iomode == IOMODE_READ) - (int)(l2->iomode == IOMODE_READ);
1367 }
1368
1369 static bool
1370 pnfs_lseg_range_is_after(const struct pnfs_layout_range *l1,
1371                 const struct pnfs_layout_range *l2)
1372 {
1373         return pnfs_lseg_range_cmp(l1, l2) > 0;
1374 }
1375
1376 static bool
1377 pnfs_lseg_no_merge(struct pnfs_layout_segment *lseg,
1378                 struct pnfs_layout_segment *old)
1379 {
1380         return false;
1381 }
1382
1383 void
1384 pnfs_generic_layout_insert_lseg(struct pnfs_layout_hdr *lo,
1385                    struct pnfs_layout_segment *lseg,
1386                    bool (*is_after)(const struct pnfs_layout_range *,
1387                            const struct pnfs_layout_range *),
1388                    bool (*do_merge)(struct pnfs_layout_segment *,
1389                            struct pnfs_layout_segment *),
1390                    struct list_head *free_me)
1391 {
1392         struct pnfs_layout_segment *lp, *tmp;
1393
1394         dprintk("%s:Begin\n", __func__);
1395
1396         list_for_each_entry_safe(lp, tmp, &lo->plh_segs, pls_list) {
1397                 if (test_bit(NFS_LSEG_VALID, &lp->pls_flags) == 0)
1398                         continue;
1399                 if (do_merge(lseg, lp)) {
1400                         mark_lseg_invalid(lp, free_me);
1401                         continue;
1402                 }
1403                 if (is_after(&lseg->pls_range, &lp->pls_range))
1404                         continue;
1405                 list_add_tail(&lseg->pls_list, &lp->pls_list);
1406                 dprintk("%s: inserted lseg %p "
1407                         "iomode %d offset %llu length %llu before "
1408                         "lp %p iomode %d offset %llu length %llu\n",
1409                         __func__, lseg, lseg->pls_range.iomode,
1410                         lseg->pls_range.offset, lseg->pls_range.length,
1411                         lp, lp->pls_range.iomode, lp->pls_range.offset,
1412                         lp->pls_range.length);
1413                 goto out;
1414         }
1415         list_add_tail(&lseg->pls_list, &lo->plh_segs);
1416         dprintk("%s: inserted lseg %p "
1417                 "iomode %d offset %llu length %llu at tail\n",
1418                 __func__, lseg, lseg->pls_range.iomode,
1419                 lseg->pls_range.offset, lseg->pls_range.length);
1420 out:
1421         pnfs_get_layout_hdr(lo);
1422
1423         dprintk("%s:Return\n", __func__);
1424 }
1425 EXPORT_SYMBOL_GPL(pnfs_generic_layout_insert_lseg);
1426
1427 static void
1428 pnfs_layout_insert_lseg(struct pnfs_layout_hdr *lo,
1429                    struct pnfs_layout_segment *lseg,
1430                    struct list_head *free_me)
1431 {
1432         struct inode *inode = lo->plh_inode;
1433         struct pnfs_layoutdriver_type *ld = NFS_SERVER(inode)->pnfs_curr_ld;
1434
1435         if (ld->add_lseg != NULL)
1436                 ld->add_lseg(lo, lseg, free_me);
1437         else
1438                 pnfs_generic_layout_insert_lseg(lo, lseg,
1439                                 pnfs_lseg_range_is_after,
1440                                 pnfs_lseg_no_merge,
1441                                 free_me);
1442 }
1443
1444 static struct pnfs_layout_hdr *
1445 alloc_init_layout_hdr(struct inode *ino,
1446                       struct nfs_open_context *ctx,
1447                       gfp_t gfp_flags)
1448 {
1449         struct pnfs_layout_hdr *lo;
1450
1451         lo = pnfs_alloc_layout_hdr(ino, gfp_flags);
1452         if (!lo)
1453                 return NULL;
1454         atomic_set(&lo->plh_refcount, 1);
1455         INIT_LIST_HEAD(&lo->plh_layouts);
1456         INIT_LIST_HEAD(&lo->plh_segs);
1457         INIT_LIST_HEAD(&lo->plh_return_segs);
1458         INIT_LIST_HEAD(&lo->plh_bulk_destroy);
1459         lo->plh_inode = ino;
1460         lo->plh_lc_cred = get_rpccred(ctx->cred);
1461         lo->plh_flags |= 1 << NFS_LAYOUT_INVALID_STID;
1462         return lo;
1463 }
1464
1465 static struct pnfs_layout_hdr *
1466 pnfs_find_alloc_layout(struct inode *ino,
1467                        struct nfs_open_context *ctx,
1468                        gfp_t gfp_flags)
1469         __releases(&ino->i_lock)
1470         __acquires(&ino->i_lock)
1471 {
1472         struct nfs_inode *nfsi = NFS_I(ino);
1473         struct pnfs_layout_hdr *new = NULL;
1474
1475         dprintk("%s Begin ino=%p layout=%p\n", __func__, ino, nfsi->layout);
1476
1477         if (nfsi->layout != NULL)
1478                 goto out_existing;
1479         spin_unlock(&ino->i_lock);
1480         new = alloc_init_layout_hdr(ino, ctx, gfp_flags);
1481         spin_lock(&ino->i_lock);
1482
1483         if (likely(nfsi->layout == NULL)) {     /* Won the race? */
1484                 nfsi->layout = new;
1485                 return new;
1486         } else if (new != NULL)
1487                 pnfs_free_layout_hdr(new);
1488 out_existing:
1489         pnfs_get_layout_hdr(nfsi->layout);
1490         return nfsi->layout;
1491 }
1492
1493 /*
1494  * iomode matching rules:
1495  * iomode       lseg    strict match
1496  *                      iomode
1497  * -----        -----   ------ -----
1498  * ANY          READ    N/A    true
1499  * ANY          RW      N/A    true
1500  * RW           READ    N/A    false
1501  * RW           RW      N/A    true
1502  * READ         READ    N/A    true
1503  * READ         RW      true   false
1504  * READ         RW      false  true
1505  */
1506 static bool
1507 pnfs_lseg_range_match(const struct pnfs_layout_range *ls_range,
1508                  const struct pnfs_layout_range *range,
1509                  bool strict_iomode)
1510 {
1511         struct pnfs_layout_range range1;
1512
1513         if ((range->iomode == IOMODE_RW &&
1514              ls_range->iomode != IOMODE_RW) ||
1515             (range->iomode != ls_range->iomode &&
1516              strict_iomode == true) ||
1517             !pnfs_lseg_range_intersecting(ls_range, range))
1518                 return 0;
1519
1520         /* range1 covers only the first byte in the range */
1521         range1 = *range;
1522         range1.length = 1;
1523         return pnfs_lseg_range_contained(ls_range, &range1);
1524 }
1525
1526 /*
1527  * lookup range in layout
1528  */
1529 static struct pnfs_layout_segment *
1530 pnfs_find_lseg(struct pnfs_layout_hdr *lo,
1531                 struct pnfs_layout_range *range,
1532                 bool strict_iomode)
1533 {
1534         struct pnfs_layout_segment *lseg, *ret = NULL;
1535
1536         dprintk("%s:Begin\n", __func__);
1537
1538         list_for_each_entry(lseg, &lo->plh_segs, pls_list) {
1539                 if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags) &&
1540                     !test_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags) &&
1541                     pnfs_lseg_range_match(&lseg->pls_range, range,
1542                                           strict_iomode)) {
1543                         ret = pnfs_get_lseg(lseg);
1544                         break;
1545                 }
1546         }
1547
1548         dprintk("%s:Return lseg %p ref %d\n",
1549                 __func__, ret, ret ? atomic_read(&ret->pls_refcount) : 0);
1550         return ret;
1551 }
1552
1553 /*
1554  * Use mdsthreshold hints set at each OPEN to determine if I/O should go
1555  * to the MDS or over pNFS
1556  *
1557  * The nfs_inode read_io and write_io fields are cumulative counters reset
1558  * when there are no layout segments. Note that in pnfs_update_layout iomode
1559  * is set to IOMODE_READ for a READ request, and set to IOMODE_RW for a
1560  * WRITE request.
1561  *
1562  * A return of true means use MDS I/O.
1563  *
1564  * From rfc 5661:
1565  * If a file's size is smaller than the file size threshold, data accesses
1566  * SHOULD be sent to the metadata server.  If an I/O request has a length that
1567  * is below the I/O size threshold, the I/O SHOULD be sent to the metadata
1568  * server.  If both file size and I/O size are provided, the client SHOULD
1569  * reach or exceed  both thresholds before sending its read or write
1570  * requests to the data server.
1571  */
1572 static bool pnfs_within_mdsthreshold(struct nfs_open_context *ctx,
1573                                      struct inode *ino, int iomode)
1574 {
1575         struct nfs4_threshold *t = ctx->mdsthreshold;
1576         struct nfs_inode *nfsi = NFS_I(ino);
1577         loff_t fsize = i_size_read(ino);
1578         bool size = false, size_set = false, io = false, io_set = false, ret = false;
1579
1580         if (t == NULL)
1581                 return ret;
1582
1583         dprintk("%s bm=0x%x rd_sz=%llu wr_sz=%llu rd_io=%llu wr_io=%llu\n",
1584                 __func__, t->bm, t->rd_sz, t->wr_sz, t->rd_io_sz, t->wr_io_sz);
1585
1586         switch (iomode) {
1587         case IOMODE_READ:
1588                 if (t->bm & THRESHOLD_RD) {
1589                         dprintk("%s fsize %llu\n", __func__, fsize);
1590                         size_set = true;
1591                         if (fsize < t->rd_sz)
1592                                 size = true;
1593                 }
1594                 if (t->bm & THRESHOLD_RD_IO) {
1595                         dprintk("%s nfsi->read_io %llu\n", __func__,
1596                                 nfsi->read_io);
1597                         io_set = true;
1598                         if (nfsi->read_io < t->rd_io_sz)
1599                                 io = true;
1600                 }
1601                 break;
1602         case IOMODE_RW:
1603                 if (t->bm & THRESHOLD_WR) {
1604                         dprintk("%s fsize %llu\n", __func__, fsize);
1605                         size_set = true;
1606                         if (fsize < t->wr_sz)
1607                                 size = true;
1608                 }
1609                 if (t->bm & THRESHOLD_WR_IO) {
1610                         dprintk("%s nfsi->write_io %llu\n", __func__,
1611                                 nfsi->write_io);
1612                         io_set = true;
1613                         if (nfsi->write_io < t->wr_io_sz)
1614                                 io = true;
1615                 }
1616                 break;
1617         }
1618         if (size_set && io_set) {
1619                 if (size && io)
1620                         ret = true;
1621         } else if (size || io)
1622                 ret = true;
1623
1624         dprintk("<-- %s size %d io %d ret %d\n", __func__, size, io, ret);
1625         return ret;
1626 }
1627
1628 static bool pnfs_prepare_to_retry_layoutget(struct pnfs_layout_hdr *lo)
1629 {
1630         /*
1631          * send layoutcommit as it can hold up layoutreturn due to lseg
1632          * reference
1633          */
1634         pnfs_layoutcommit_inode(lo->plh_inode, false);
1635         return !wait_on_bit_action(&lo->plh_flags, NFS_LAYOUT_RETURN,
1636                                    nfs_wait_bit_killable,
1637                                    TASK_UNINTERRUPTIBLE);
1638 }
1639
1640 static void pnfs_clear_first_layoutget(struct pnfs_layout_hdr *lo)
1641 {
1642         unsigned long *bitlock = &lo->plh_flags;
1643
1644         clear_bit_unlock(NFS_LAYOUT_FIRST_LAYOUTGET, bitlock);
1645         smp_mb__after_atomic();
1646         wake_up_bit(bitlock, NFS_LAYOUT_FIRST_LAYOUTGET);
1647 }
1648
1649 /*
1650  * Layout segment is retreived from the server if not cached.
1651  * The appropriate layout segment is referenced and returned to the caller.
1652  */
1653 struct pnfs_layout_segment *
1654 pnfs_update_layout(struct inode *ino,
1655                    struct nfs_open_context *ctx,
1656                    loff_t pos,
1657                    u64 count,
1658                    enum pnfs_iomode iomode,
1659                    bool strict_iomode,
1660                    gfp_t gfp_flags)
1661 {
1662         struct pnfs_layout_range arg = {
1663                 .iomode = iomode,
1664                 .offset = pos,
1665                 .length = count,
1666         };
1667         unsigned pg_offset, seq;
1668         struct nfs_server *server = NFS_SERVER(ino);
1669         struct nfs_client *clp = server->nfs_client;
1670         struct pnfs_layout_hdr *lo = NULL;
1671         struct pnfs_layout_segment *lseg = NULL;
1672         nfs4_stateid stateid;
1673         long timeout = 0;
1674         unsigned long giveup = jiffies + (clp->cl_lease_time << 1);
1675         bool first;
1676
1677         if (!pnfs_enabled_sb(NFS_SERVER(ino))) {
1678                 trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
1679                                  PNFS_UPDATE_LAYOUT_NO_PNFS);
1680                 goto out;
1681         }
1682
1683         if (iomode == IOMODE_READ && i_size_read(ino) == 0) {
1684                 trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
1685                                  PNFS_UPDATE_LAYOUT_RD_ZEROLEN);
1686                 goto out;
1687         }
1688
1689         if (pnfs_within_mdsthreshold(ctx, ino, iomode)) {
1690                 trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
1691                                  PNFS_UPDATE_LAYOUT_MDSTHRESH);
1692                 goto out;
1693         }
1694
1695 lookup_again:
1696         nfs4_client_recover_expired_lease(clp);
1697         first = false;
1698         spin_lock(&ino->i_lock);
1699         lo = pnfs_find_alloc_layout(ino, ctx, gfp_flags);
1700         if (lo == NULL) {
1701                 spin_unlock(&ino->i_lock);
1702                 trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
1703                                  PNFS_UPDATE_LAYOUT_NOMEM);
1704                 goto out;
1705         }
1706
1707         /* Do we even need to bother with this? */
1708         if (test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) {
1709                 trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
1710                                  PNFS_UPDATE_LAYOUT_BULK_RECALL);
1711                 dprintk("%s matches recall, use MDS\n", __func__);
1712                 goto out_unlock;
1713         }
1714
1715         /* if LAYOUTGET already failed once we don't try again */
1716         if (pnfs_layout_io_test_failed(lo, iomode)) {
1717                 trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
1718                                  PNFS_UPDATE_LAYOUT_IO_TEST_FAIL);
1719                 goto out_unlock;
1720         }
1721
1722         lseg = pnfs_find_lseg(lo, &arg, strict_iomode);
1723         if (lseg) {
1724                 trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
1725                                 PNFS_UPDATE_LAYOUT_FOUND_CACHED);
1726                 goto out_unlock;
1727         }
1728
1729         if (!nfs4_valid_open_stateid(ctx->state)) {
1730                 trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
1731                                 PNFS_UPDATE_LAYOUT_INVALID_OPEN);
1732                 goto out_unlock;
1733         }
1734
1735         /*
1736          * Choose a stateid for the LAYOUTGET. If we don't have a layout
1737          * stateid, or it has been invalidated, then we must use the open
1738          * stateid.
1739          */
1740         if (test_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags)) {
1741
1742                 /*
1743                  * The first layoutget for the file. Need to serialize per
1744                  * RFC 5661 Errata 3208.
1745                  */
1746                 if (test_and_set_bit(NFS_LAYOUT_FIRST_LAYOUTGET,
1747                                      &lo->plh_flags)) {
1748                         spin_unlock(&ino->i_lock);
1749                         wait_on_bit(&lo->plh_flags, NFS_LAYOUT_FIRST_LAYOUTGET,
1750                                     TASK_UNINTERRUPTIBLE);
1751                         pnfs_put_layout_hdr(lo);
1752                         dprintk("%s retrying\n", __func__);
1753                         goto lookup_again;
1754                 }
1755
1756                 first = true;
1757                 do {
1758                         seq = read_seqbegin(&ctx->state->seqlock);
1759                         nfs4_stateid_copy(&stateid, &ctx->state->stateid);
1760                 } while (read_seqretry(&ctx->state->seqlock, seq));
1761         } else {
1762                 nfs4_stateid_copy(&stateid, &lo->plh_stateid);
1763         }
1764
1765         /*
1766          * Because we free lsegs before sending LAYOUTRETURN, we need to wait
1767          * for LAYOUTRETURN even if first is true.
1768          */
1769         if (test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags)) {
1770                 spin_unlock(&ino->i_lock);
1771                 dprintk("%s wait for layoutreturn\n", __func__);
1772                 if (pnfs_prepare_to_retry_layoutget(lo)) {
1773                         if (first)
1774                                 pnfs_clear_first_layoutget(lo);
1775                         pnfs_put_layout_hdr(lo);
1776                         dprintk("%s retrying\n", __func__);
1777                         trace_pnfs_update_layout(ino, pos, count, iomode, lo,
1778                                         lseg, PNFS_UPDATE_LAYOUT_RETRY);
1779                         goto lookup_again;
1780                 }
1781                 trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
1782                                 PNFS_UPDATE_LAYOUT_RETURN);
1783                 goto out_put_layout_hdr;
1784         }
1785
1786         if (pnfs_layoutgets_blocked(lo)) {
1787                 trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
1788                                 PNFS_UPDATE_LAYOUT_BLOCKED);
1789                 goto out_unlock;
1790         }
1791         atomic_inc(&lo->plh_outstanding);
1792         spin_unlock(&ino->i_lock);
1793
1794         if (list_empty(&lo->plh_layouts)) {
1795                 /* The lo must be on the clp list if there is any
1796                  * chance of a CB_LAYOUTRECALL(FILE) coming in.
1797                  */
1798                 spin_lock(&clp->cl_lock);
1799                 if (list_empty(&lo->plh_layouts))
1800                         list_add_tail(&lo->plh_layouts, &server->layouts);
1801                 spin_unlock(&clp->cl_lock);
1802         }
1803
1804         pg_offset = arg.offset & ~PAGE_MASK;
1805         if (pg_offset) {
1806                 arg.offset -= pg_offset;
1807                 arg.length += pg_offset;
1808         }
1809         if (arg.length != NFS4_MAX_UINT64)
1810                 arg.length = PAGE_ALIGN(arg.length);
1811
1812         lseg = send_layoutget(lo, ctx, &stateid, &arg, &timeout, gfp_flags);
1813         trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
1814                                  PNFS_UPDATE_LAYOUT_SEND_LAYOUTGET);
1815         atomic_dec(&lo->plh_outstanding);
1816         if (IS_ERR(lseg)) {
1817                 switch(PTR_ERR(lseg)) {
1818                 case -EBUSY:
1819                         if (time_after(jiffies, giveup))
1820                                 lseg = NULL;
1821                         break;
1822                 case -ERECALLCONFLICT:
1823                         /* Huh? We hold no layouts, how is there a recall? */
1824                         if (first) {
1825                                 lseg = NULL;
1826                                 break;
1827                         }
1828                         /* Destroy the existing layout and start over */
1829                         if (time_after(jiffies, giveup))
1830                                 pnfs_destroy_layout(NFS_I(ino));
1831                         /* Fallthrough */
1832                 case -EAGAIN:
1833                         break;
1834                 default:
1835                         if (!nfs_error_is_fatal(PTR_ERR(lseg))) {
1836                                 pnfs_layout_clear_fail_bit(lo, pnfs_iomode_to_fail_bit(iomode));
1837                                 lseg = NULL;
1838                         }
1839                         goto out_put_layout_hdr;
1840                 }
1841                 if (lseg) {
1842                         if (first)
1843                                 pnfs_clear_first_layoutget(lo);
1844                         trace_pnfs_update_layout(ino, pos, count,
1845                                 iomode, lo, lseg, PNFS_UPDATE_LAYOUT_RETRY);
1846                         pnfs_put_layout_hdr(lo);
1847                         goto lookup_again;
1848                 }
1849         } else {
1850                 pnfs_layout_clear_fail_bit(lo, pnfs_iomode_to_fail_bit(iomode));
1851         }
1852
1853 out_put_layout_hdr:
1854         if (first)
1855                 pnfs_clear_first_layoutget(lo);
1856         pnfs_put_layout_hdr(lo);
1857 out:
1858         dprintk("%s: inode %s/%llu pNFS layout segment %s for "
1859                         "(%s, offset: %llu, length: %llu)\n",
1860                         __func__, ino->i_sb->s_id,
1861                         (unsigned long long)NFS_FILEID(ino),
1862                         IS_ERR_OR_NULL(lseg) ? "not found" : "found",
1863                         iomode==IOMODE_RW ?  "read/write" : "read-only",
1864                         (unsigned long long)pos,
1865                         (unsigned long long)count);
1866         return lseg;
1867 out_unlock:
1868         spin_unlock(&ino->i_lock);
1869         goto out_put_layout_hdr;
1870 }
1871 EXPORT_SYMBOL_GPL(pnfs_update_layout);
1872
1873 static bool
1874 pnfs_sanity_check_layout_range(struct pnfs_layout_range *range)
1875 {
1876         switch (range->iomode) {
1877         case IOMODE_READ:
1878         case IOMODE_RW:
1879                 break;
1880         default:
1881                 return false;
1882         }
1883         if (range->offset == NFS4_MAX_UINT64)
1884                 return false;
1885         if (range->length == 0)
1886                 return false;
1887         if (range->length != NFS4_MAX_UINT64 &&
1888             range->length > NFS4_MAX_UINT64 - range->offset)
1889                 return false;
1890         return true;
1891 }
1892
1893 struct pnfs_layout_segment *
1894 pnfs_layout_process(struct nfs4_layoutget *lgp)
1895 {
1896         struct pnfs_layout_hdr *lo = NFS_I(lgp->args.inode)->layout;
1897         struct nfs4_layoutget_res *res = &lgp->res;
1898         struct pnfs_layout_segment *lseg;
1899         struct inode *ino = lo->plh_inode;
1900         LIST_HEAD(free_me);
1901
1902         if (!pnfs_sanity_check_layout_range(&res->range))
1903                 return ERR_PTR(-EINVAL);
1904
1905         /* Inject layout blob into I/O device driver */
1906         lseg = NFS_SERVER(ino)->pnfs_curr_ld->alloc_lseg(lo, res, lgp->gfp_flags);
1907         if (IS_ERR_OR_NULL(lseg)) {
1908                 if (!lseg)
1909                         lseg = ERR_PTR(-ENOMEM);
1910
1911                 dprintk("%s: Could not allocate layout: error %ld\n",
1912                        __func__, PTR_ERR(lseg));
1913                 return lseg;
1914         }
1915
1916         pnfs_init_lseg(lo, lseg, &res->range, &res->stateid);
1917
1918         spin_lock(&ino->i_lock);
1919         if (pnfs_layoutgets_blocked(lo)) {
1920                 dprintk("%s forget reply due to state\n", __func__);
1921                 goto out_forget;
1922         }
1923
1924         if (!pnfs_layout_is_valid(lo)) {
1925                 /* We have a completely new layout */
1926                 pnfs_set_layout_stateid(lo, &res->stateid, true);
1927         } else if (nfs4_stateid_match_other(&lo->plh_stateid, &res->stateid)) {
1928                 /* existing state ID, make sure the sequence number matches. */
1929                 if (pnfs_layout_stateid_blocked(lo, &res->stateid)) {
1930                         dprintk("%s forget reply due to sequence\n", __func__);
1931                         goto out_forget;
1932                 }
1933                 pnfs_set_layout_stateid(lo, &res->stateid, false);
1934         } else {
1935                 /*
1936                  * We got an entirely new state ID.  Mark all segments for the
1937                  * inode invalid, and retry the layoutget
1938                  */
1939                 pnfs_mark_layout_stateid_invalid(lo, &free_me);
1940                 goto out_forget;
1941         }
1942
1943         pnfs_get_lseg(lseg);
1944         pnfs_layout_insert_lseg(lo, lseg, &free_me);
1945
1946
1947         if (res->return_on_close)
1948                 set_bit(NFS_LSEG_ROC, &lseg->pls_flags);
1949
1950         spin_unlock(&ino->i_lock);
1951         pnfs_free_lseg_list(&free_me);
1952         return lseg;
1953
1954 out_forget:
1955         spin_unlock(&ino->i_lock);
1956         lseg->pls_layout = lo;
1957         NFS_SERVER(ino)->pnfs_curr_ld->free_lseg(lseg);
1958         if (!pnfs_layout_is_valid(lo))
1959                 nfs_commit_inode(ino, 0);
1960         return ERR_PTR(-EAGAIN);
1961 }
1962
1963 /**
1964  * pnfs_mark_matching_lsegs_return - Free or return matching layout segments
1965  * @lo: pointer to layout header
1966  * @tmp_list: list header to be used with pnfs_free_lseg_list()
1967  * @return_range: describe layout segment ranges to be returned
1968  *
1969  * This function is mainly intended for use by layoutrecall. It attempts
1970  * to free the layout segment immediately, or else to mark it for return
1971  * as soon as its reference count drops to zero.
1972  */
1973 int
1974 pnfs_mark_matching_lsegs_return(struct pnfs_layout_hdr *lo,
1975                                 struct list_head *tmp_list,
1976                                 const struct pnfs_layout_range *return_range,
1977                                 u32 seq)
1978 {
1979         struct pnfs_layout_segment *lseg, *next;
1980         int remaining = 0;
1981
1982         dprintk("%s:Begin lo %p\n", __func__, lo);
1983
1984         if (list_empty(&lo->plh_segs))
1985                 return 0;
1986
1987         assert_spin_locked(&lo->plh_inode->i_lock);
1988
1989         list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list)
1990                 if (pnfs_match_lseg_recall(lseg, return_range, seq)) {
1991                         dprintk("%s: marking lseg %p iomode %d "
1992                                 "offset %llu length %llu\n", __func__,
1993                                 lseg, lseg->pls_range.iomode,
1994                                 lseg->pls_range.offset,
1995                                 lseg->pls_range.length);
1996                         if (mark_lseg_invalid(lseg, tmp_list))
1997                                 continue;
1998                         remaining++;
1999                         set_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags);
2000                 }
2001
2002         if (remaining)
2003                 pnfs_set_plh_return_info(lo, return_range->iomode, seq);
2004
2005         return remaining;
2006 }
2007
2008 void pnfs_error_mark_layout_for_return(struct inode *inode,
2009                                        struct pnfs_layout_segment *lseg)
2010 {
2011         struct pnfs_layout_hdr *lo = NFS_I(inode)->layout;
2012         struct pnfs_layout_range range = {
2013                 .iomode = lseg->pls_range.iomode,
2014                 .offset = 0,
2015                 .length = NFS4_MAX_UINT64,
2016         };
2017         bool return_now = false;
2018
2019         spin_lock(&inode->i_lock);
2020         if (!pnfs_layout_is_valid(lo)) {
2021                 spin_unlock(&inode->i_lock);
2022                 return;
2023         }
2024         pnfs_set_plh_return_info(lo, range.iomode, 0);
2025         /*
2026          * mark all matching lsegs so that we are sure to have no live
2027          * segments at hand when sending layoutreturn. See pnfs_put_lseg()
2028          * for how it works.
2029          */
2030         if (!pnfs_mark_matching_lsegs_return(lo, &lo->plh_return_segs, &range, 0)) {
2031                 nfs4_stateid stateid;
2032                 enum pnfs_iomode iomode;
2033
2034                 return_now = pnfs_prepare_layoutreturn(lo, &stateid, &iomode);
2035                 spin_unlock(&inode->i_lock);
2036                 if (return_now)
2037                         pnfs_send_layoutreturn(lo, &stateid, iomode, false);
2038         } else {
2039                 spin_unlock(&inode->i_lock);
2040                 nfs_commit_inode(inode, 0);
2041         }
2042 }
2043 EXPORT_SYMBOL_GPL(pnfs_error_mark_layout_for_return);
2044
2045 void
2046 pnfs_generic_pg_check_layout(struct nfs_pageio_descriptor *pgio)
2047 {
2048         if (pgio->pg_lseg == NULL ||
2049             test_bit(NFS_LSEG_VALID, &pgio->pg_lseg->pls_flags))
2050                 return;
2051         pnfs_put_lseg(pgio->pg_lseg);
2052         pgio->pg_lseg = NULL;
2053 }
2054 EXPORT_SYMBOL_GPL(pnfs_generic_pg_check_layout);
2055
2056 /*
2057  * Check for any intersection between the request and the pgio->pg_lseg,
2058  * and if none, put this pgio->pg_lseg away.
2059  */
2060 static void
2061 pnfs_generic_pg_check_range(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
2062 {
2063         if (pgio->pg_lseg && !pnfs_lseg_request_intersecting(pgio->pg_lseg, req)) {
2064                 pnfs_put_lseg(pgio->pg_lseg);
2065                 pgio->pg_lseg = NULL;
2066         }
2067 }
2068
2069 void
2070 pnfs_generic_pg_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
2071 {
2072         u64 rd_size = req->wb_bytes;
2073
2074         pnfs_generic_pg_check_layout(pgio);
2075         pnfs_generic_pg_check_range(pgio, req);
2076         if (pgio->pg_lseg == NULL) {
2077                 if (pgio->pg_dreq == NULL)
2078                         rd_size = i_size_read(pgio->pg_inode) - req_offset(req);
2079                 else
2080                         rd_size = nfs_dreq_bytes_left(pgio->pg_dreq);
2081
2082                 pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
2083                                                    req->wb_context,
2084                                                    req_offset(req),
2085                                                    rd_size,
2086                                                    IOMODE_READ,
2087                                                    false,
2088                                                    GFP_KERNEL);
2089                 if (IS_ERR(pgio->pg_lseg)) {
2090                         pgio->pg_error = PTR_ERR(pgio->pg_lseg);
2091                         pgio->pg_lseg = NULL;
2092                         return;
2093                 }
2094         }
2095         /* If no lseg, fall back to read through mds */
2096         if (pgio->pg_lseg == NULL)
2097                 nfs_pageio_reset_read_mds(pgio);
2098
2099 }
2100 EXPORT_SYMBOL_GPL(pnfs_generic_pg_init_read);
2101
2102 void
2103 pnfs_generic_pg_init_write(struct nfs_pageio_descriptor *pgio,
2104                            struct nfs_page *req, u64 wb_size)
2105 {
2106         pnfs_generic_pg_check_layout(pgio);
2107         pnfs_generic_pg_check_range(pgio, req);
2108         if (pgio->pg_lseg == NULL) {
2109                 pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
2110                                                    req->wb_context,
2111                                                    req_offset(req),
2112                                                    wb_size,
2113                                                    IOMODE_RW,
2114                                                    false,
2115                                                    GFP_NOFS);
2116                 if (IS_ERR(pgio->pg_lseg)) {
2117                         pgio->pg_error = PTR_ERR(pgio->pg_lseg);
2118                         pgio->pg_lseg = NULL;
2119                         return;
2120                 }
2121         }
2122         /* If no lseg, fall back to write through mds */
2123         if (pgio->pg_lseg == NULL)
2124                 nfs_pageio_reset_write_mds(pgio);
2125 }
2126 EXPORT_SYMBOL_GPL(pnfs_generic_pg_init_write);
2127
2128 void
2129 pnfs_generic_pg_cleanup(struct nfs_pageio_descriptor *desc)
2130 {
2131         if (desc->pg_lseg) {
2132                 pnfs_put_lseg(desc->pg_lseg);
2133                 desc->pg_lseg = NULL;
2134         }
2135 }
2136 EXPORT_SYMBOL_GPL(pnfs_generic_pg_cleanup);
2137
2138 /*
2139  * Return 0 if @req cannot be coalesced into @pgio, otherwise return the number
2140  * of bytes (maximum @req->wb_bytes) that can be coalesced.
2141  */
2142 size_t
2143 pnfs_generic_pg_test(struct nfs_pageio_descriptor *pgio,
2144                      struct nfs_page *prev, struct nfs_page *req)
2145 {
2146         unsigned int size;
2147         u64 seg_end, req_start, seg_left;
2148
2149         size = nfs_generic_pg_test(pgio, prev, req);
2150         if (!size)
2151                 return 0;
2152
2153         /*
2154          * 'size' contains the number of bytes left in the current page (up
2155          * to the original size asked for in @req->wb_bytes).
2156          *
2157          * Calculate how many bytes are left in the layout segment
2158          * and if there are less bytes than 'size', return that instead.
2159          *
2160          * Please also note that 'end_offset' is actually the offset of the
2161          * first byte that lies outside the pnfs_layout_range. FIXME?
2162          *
2163          */
2164         if (pgio->pg_lseg) {
2165                 seg_end = pnfs_end_offset(pgio->pg_lseg->pls_range.offset,
2166                                      pgio->pg_lseg->pls_range.length);
2167                 req_start = req_offset(req);
2168
2169                 /* start of request is past the last byte of this segment */
2170                 if (req_start >= seg_end)
2171                         return 0;
2172
2173                 /* adjust 'size' iff there are fewer bytes left in the
2174                  * segment than what nfs_generic_pg_test returned */
2175                 seg_left = seg_end - req_start;
2176                 if (seg_left < size)
2177                         size = (unsigned int)seg_left;
2178         }
2179
2180         return size;
2181 }
2182 EXPORT_SYMBOL_GPL(pnfs_generic_pg_test);
2183
2184 int pnfs_write_done_resend_to_mds(struct nfs_pgio_header *hdr)
2185 {
2186         struct nfs_pageio_descriptor pgio;
2187
2188         /* Resend all requests through the MDS */
2189         nfs_pageio_init_write(&pgio, hdr->inode, FLUSH_STABLE, true,
2190                               hdr->completion_ops);
2191         set_bit(NFS_CONTEXT_RESEND_WRITES, &hdr->args.context->flags);
2192         return nfs_pageio_resend(&pgio, hdr);
2193 }
2194 EXPORT_SYMBOL_GPL(pnfs_write_done_resend_to_mds);
2195
2196 static void pnfs_ld_handle_write_error(struct nfs_pgio_header *hdr)
2197 {
2198
2199         dprintk("pnfs write error = %d\n", hdr->pnfs_error);
2200         if (NFS_SERVER(hdr->inode)->pnfs_curr_ld->flags &
2201             PNFS_LAYOUTRET_ON_ERROR) {
2202                 pnfs_return_layout(hdr->inode);
2203         }
2204         if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags))
2205                 hdr->task.tk_status = pnfs_write_done_resend_to_mds(hdr);
2206 }
2207
2208 /*
2209  * Called by non rpc-based layout drivers
2210  */
2211 void pnfs_ld_write_done(struct nfs_pgio_header *hdr)
2212 {
2213         if (likely(!hdr->pnfs_error)) {
2214                 pnfs_set_layoutcommit(hdr->inode, hdr->lseg,
2215                                 hdr->mds_offset + hdr->res.count);
2216                 hdr->mds_ops->rpc_call_done(&hdr->task, hdr);
2217         }
2218         trace_nfs4_pnfs_write(hdr, hdr->pnfs_error);
2219         if (unlikely(hdr->pnfs_error))
2220                 pnfs_ld_handle_write_error(hdr);
2221         hdr->mds_ops->rpc_release(hdr);
2222 }
2223 EXPORT_SYMBOL_GPL(pnfs_ld_write_done);
2224
2225 static void
2226 pnfs_write_through_mds(struct nfs_pageio_descriptor *desc,
2227                 struct nfs_pgio_header *hdr)
2228 {
2229         struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
2230
2231         if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
2232                 list_splice_tail_init(&hdr->pages, &mirror->pg_list);
2233                 nfs_pageio_reset_write_mds(desc);
2234                 mirror->pg_recoalesce = 1;
2235         }
2236         hdr->release(hdr);
2237 }
2238
2239 static enum pnfs_try_status
2240 pnfs_try_to_write_data(struct nfs_pgio_header *hdr,
2241                         const struct rpc_call_ops *call_ops,
2242                         struct pnfs_layout_segment *lseg,
2243                         int how)
2244 {
2245         struct inode *inode = hdr->inode;
2246         enum pnfs_try_status trypnfs;
2247         struct nfs_server *nfss = NFS_SERVER(inode);
2248
2249         hdr->mds_ops = call_ops;
2250
2251         dprintk("%s: Writing ino:%lu %u@%llu (how %d)\n", __func__,
2252                 inode->i_ino, hdr->args.count, hdr->args.offset, how);
2253         trypnfs = nfss->pnfs_curr_ld->write_pagelist(hdr, how);
2254         if (trypnfs != PNFS_NOT_ATTEMPTED)
2255                 nfs_inc_stats(inode, NFSIOS_PNFS_WRITE);
2256         dprintk("%s End (trypnfs:%d)\n", __func__, trypnfs);
2257         return trypnfs;
2258 }
2259
2260 static void
2261 pnfs_do_write(struct nfs_pageio_descriptor *desc,
2262               struct nfs_pgio_header *hdr, int how)
2263 {
2264         const struct rpc_call_ops *call_ops = desc->pg_rpc_callops;
2265         struct pnfs_layout_segment *lseg = desc->pg_lseg;
2266         enum pnfs_try_status trypnfs;
2267
2268         trypnfs = pnfs_try_to_write_data(hdr, call_ops, lseg, how);
2269         switch (trypnfs) {
2270         case PNFS_NOT_ATTEMPTED:
2271                 pnfs_write_through_mds(desc, hdr);
2272         case PNFS_ATTEMPTED:
2273                 break;
2274         case PNFS_TRY_AGAIN:
2275                 /* cleanup hdr and prepare to redo pnfs */
2276                 if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
2277                         struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
2278                         list_splice_init(&hdr->pages, &mirror->pg_list);
2279                         mirror->pg_recoalesce = 1;
2280                 }
2281                 hdr->mds_ops->rpc_release(hdr);
2282         }
2283 }
2284
2285 static void pnfs_writehdr_free(struct nfs_pgio_header *hdr)
2286 {
2287         pnfs_put_lseg(hdr->lseg);
2288         nfs_pgio_header_free(hdr);
2289 }
2290
2291 int
2292 pnfs_generic_pg_writepages(struct nfs_pageio_descriptor *desc)
2293 {
2294         struct nfs_pgio_header *hdr;
2295         int ret;
2296
2297         hdr = nfs_pgio_header_alloc(desc->pg_rw_ops);
2298         if (!hdr) {
2299                 desc->pg_error = -ENOMEM;
2300                 return desc->pg_error;
2301         }
2302         nfs_pgheader_init(desc, hdr, pnfs_writehdr_free);
2303
2304         hdr->lseg = pnfs_get_lseg(desc->pg_lseg);
2305         ret = nfs_generic_pgio(desc, hdr);
2306         if (!ret)
2307                 pnfs_do_write(desc, hdr, desc->pg_ioflags);
2308
2309         return ret;
2310 }
2311 EXPORT_SYMBOL_GPL(pnfs_generic_pg_writepages);
2312
2313 int pnfs_read_done_resend_to_mds(struct nfs_pgio_header *hdr)
2314 {
2315         struct nfs_pageio_descriptor pgio;
2316
2317         /* Resend all requests through the MDS */
2318         nfs_pageio_init_read(&pgio, hdr->inode, true, hdr->completion_ops);
2319         return nfs_pageio_resend(&pgio, hdr);
2320 }
2321 EXPORT_SYMBOL_GPL(pnfs_read_done_resend_to_mds);
2322
2323 static void pnfs_ld_handle_read_error(struct nfs_pgio_header *hdr)
2324 {
2325         dprintk("pnfs read error = %d\n", hdr->pnfs_error);
2326         if (NFS_SERVER(hdr->inode)->pnfs_curr_ld->flags &
2327             PNFS_LAYOUTRET_ON_ERROR) {
2328                 pnfs_return_layout(hdr->inode);
2329         }
2330         if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags))
2331                 hdr->task.tk_status = pnfs_read_done_resend_to_mds(hdr);
2332 }
2333
2334 /*
2335  * Called by non rpc-based layout drivers
2336  */
2337 void pnfs_ld_read_done(struct nfs_pgio_header *hdr)
2338 {
2339         if (likely(!hdr->pnfs_error))
2340                 hdr->mds_ops->rpc_call_done(&hdr->task, hdr);
2341         trace_nfs4_pnfs_read(hdr, hdr->pnfs_error);
2342         if (unlikely(hdr->pnfs_error))
2343                 pnfs_ld_handle_read_error(hdr);
2344         hdr->mds_ops->rpc_release(hdr);
2345 }
2346 EXPORT_SYMBOL_GPL(pnfs_ld_read_done);
2347
2348 static void
2349 pnfs_read_through_mds(struct nfs_pageio_descriptor *desc,
2350                 struct nfs_pgio_header *hdr)
2351 {
2352         struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
2353
2354         if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
2355                 list_splice_tail_init(&hdr->pages, &mirror->pg_list);
2356                 nfs_pageio_reset_read_mds(desc);
2357                 mirror->pg_recoalesce = 1;
2358         }
2359         hdr->release(hdr);
2360 }
2361
2362 /*
2363  * Call the appropriate parallel I/O subsystem read function.
2364  */
2365 static enum pnfs_try_status
2366 pnfs_try_to_read_data(struct nfs_pgio_header *hdr,
2367                        const struct rpc_call_ops *call_ops,
2368                        struct pnfs_layout_segment *lseg)
2369 {
2370         struct inode *inode = hdr->inode;
2371         struct nfs_server *nfss = NFS_SERVER(inode);
2372         enum pnfs_try_status trypnfs;
2373
2374         hdr->mds_ops = call_ops;
2375
2376         dprintk("%s: Reading ino:%lu %u@%llu\n",
2377                 __func__, inode->i_ino, hdr->args.count, hdr->args.offset);
2378
2379         trypnfs = nfss->pnfs_curr_ld->read_pagelist(hdr);
2380         if (trypnfs != PNFS_NOT_ATTEMPTED)
2381                 nfs_inc_stats(inode, NFSIOS_PNFS_READ);
2382         dprintk("%s End (trypnfs:%d)\n", __func__, trypnfs);
2383         return trypnfs;
2384 }
2385
2386 /* Resend all requests through pnfs. */
2387 void pnfs_read_resend_pnfs(struct nfs_pgio_header *hdr)
2388 {
2389         struct nfs_pageio_descriptor pgio;
2390
2391         if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
2392                 /* Prevent deadlocks with layoutreturn! */
2393                 pnfs_put_lseg(hdr->lseg);
2394                 hdr->lseg = NULL;
2395
2396                 nfs_pageio_init_read(&pgio, hdr->inode, false,
2397                                         hdr->completion_ops);
2398                 hdr->task.tk_status = nfs_pageio_resend(&pgio, hdr);
2399         }
2400 }
2401 EXPORT_SYMBOL_GPL(pnfs_read_resend_pnfs);
2402
2403 static void
2404 pnfs_do_read(struct nfs_pageio_descriptor *desc, struct nfs_pgio_header *hdr)
2405 {
2406         const struct rpc_call_ops *call_ops = desc->pg_rpc_callops;
2407         struct pnfs_layout_segment *lseg = desc->pg_lseg;
2408         enum pnfs_try_status trypnfs;
2409
2410         trypnfs = pnfs_try_to_read_data(hdr, call_ops, lseg);
2411         switch (trypnfs) {
2412         case PNFS_NOT_ATTEMPTED:
2413                 pnfs_read_through_mds(desc, hdr);
2414         case PNFS_ATTEMPTED:
2415                 break;
2416         case PNFS_TRY_AGAIN:
2417                 /* cleanup hdr and prepare to redo pnfs */
2418                 if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
2419                         struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
2420                         list_splice_init(&hdr->pages, &mirror->pg_list);
2421                         mirror->pg_recoalesce = 1;
2422                 }
2423                 hdr->mds_ops->rpc_release(hdr);
2424         }
2425 }
2426
2427 static void pnfs_readhdr_free(struct nfs_pgio_header *hdr)
2428 {
2429         pnfs_put_lseg(hdr->lseg);
2430         nfs_pgio_header_free(hdr);
2431 }
2432
2433 int
2434 pnfs_generic_pg_readpages(struct nfs_pageio_descriptor *desc)
2435 {
2436         struct nfs_pgio_header *hdr;
2437         int ret;
2438
2439         hdr = nfs_pgio_header_alloc(desc->pg_rw_ops);
2440         if (!hdr) {
2441                 desc->pg_error = -ENOMEM;
2442                 return desc->pg_error;
2443         }
2444         nfs_pgheader_init(desc, hdr, pnfs_readhdr_free);
2445         hdr->lseg = pnfs_get_lseg(desc->pg_lseg);
2446         ret = nfs_generic_pgio(desc, hdr);
2447         if (!ret)
2448                 pnfs_do_read(desc, hdr);
2449         return ret;
2450 }
2451 EXPORT_SYMBOL_GPL(pnfs_generic_pg_readpages);
2452
2453 static void pnfs_clear_layoutcommitting(struct inode *inode)
2454 {
2455         unsigned long *bitlock = &NFS_I(inode)->flags;
2456
2457         clear_bit_unlock(NFS_INO_LAYOUTCOMMITTING, bitlock);
2458         smp_mb__after_atomic();
2459         wake_up_bit(bitlock, NFS_INO_LAYOUTCOMMITTING);
2460 }
2461
2462 /*
2463  * There can be multiple RW segments.
2464  */
2465 static void pnfs_list_write_lseg(struct inode *inode, struct list_head *listp)
2466 {
2467         struct pnfs_layout_segment *lseg;
2468
2469         list_for_each_entry(lseg, &NFS_I(inode)->layout->plh_segs, pls_list) {
2470                 if (lseg->pls_range.iomode == IOMODE_RW &&
2471                     test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT, &lseg->pls_flags))
2472                         list_add(&lseg->pls_lc_list, listp);
2473         }
2474 }
2475
2476 static void pnfs_list_write_lseg_done(struct inode *inode, struct list_head *listp)
2477 {
2478         struct pnfs_layout_segment *lseg, *tmp;
2479
2480         /* Matched by references in pnfs_set_layoutcommit */
2481         list_for_each_entry_safe(lseg, tmp, listp, pls_lc_list) {
2482                 list_del_init(&lseg->pls_lc_list);
2483                 pnfs_put_lseg(lseg);
2484         }
2485
2486         pnfs_clear_layoutcommitting(inode);
2487 }
2488
2489 void pnfs_set_lo_fail(struct pnfs_layout_segment *lseg)
2490 {
2491         pnfs_layout_io_set_failed(lseg->pls_layout, lseg->pls_range.iomode);
2492 }
2493 EXPORT_SYMBOL_GPL(pnfs_set_lo_fail);
2494
2495 void
2496 pnfs_set_layoutcommit(struct inode *inode, struct pnfs_layout_segment *lseg,
2497                 loff_t end_pos)
2498 {
2499         struct nfs_inode *nfsi = NFS_I(inode);
2500         bool mark_as_dirty = false;
2501
2502         spin_lock(&inode->i_lock);
2503         if (!test_and_set_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags)) {
2504                 nfsi->layout->plh_lwb = end_pos;
2505                 mark_as_dirty = true;
2506                 dprintk("%s: Set layoutcommit for inode %lu ",
2507                         __func__, inode->i_ino);
2508         } else if (end_pos > nfsi->layout->plh_lwb)
2509                 nfsi->layout->plh_lwb = end_pos;
2510         if (!test_and_set_bit(NFS_LSEG_LAYOUTCOMMIT, &lseg->pls_flags)) {
2511                 /* references matched in nfs4_layoutcommit_release */
2512                 pnfs_get_lseg(lseg);
2513         }
2514         spin_unlock(&inode->i_lock);
2515         dprintk("%s: lseg %p end_pos %llu\n",
2516                 __func__, lseg, nfsi->layout->plh_lwb);
2517
2518         /* if pnfs_layoutcommit_inode() runs between inode locks, the next one
2519          * will be a noop because NFS_INO_LAYOUTCOMMIT will not be set */
2520         if (mark_as_dirty)
2521                 mark_inode_dirty_sync(inode);
2522 }
2523 EXPORT_SYMBOL_GPL(pnfs_set_layoutcommit);
2524
2525 void pnfs_cleanup_layoutcommit(struct nfs4_layoutcommit_data *data)
2526 {
2527         struct nfs_server *nfss = NFS_SERVER(data->args.inode);
2528
2529         if (nfss->pnfs_curr_ld->cleanup_layoutcommit)
2530                 nfss->pnfs_curr_ld->cleanup_layoutcommit(data);
2531         pnfs_list_write_lseg_done(data->args.inode, &data->lseg_list);
2532 }
2533
2534 /*
2535  * For the LAYOUT4_NFSV4_1_FILES layout type, NFS_DATA_SYNC WRITEs and
2536  * NFS_UNSTABLE WRITEs with a COMMIT to data servers must store enough
2537  * data to disk to allow the server to recover the data if it crashes.
2538  * LAYOUTCOMMIT is only needed when the NFL4_UFLG_COMMIT_THRU_MDS flag
2539  * is off, and a COMMIT is sent to a data server, or
2540  * if WRITEs to a data server return NFS_DATA_SYNC.
2541  */
2542 int
2543 pnfs_layoutcommit_inode(struct inode *inode, bool sync)
2544 {
2545         struct pnfs_layoutdriver_type *ld = NFS_SERVER(inode)->pnfs_curr_ld;
2546         struct nfs4_layoutcommit_data *data;
2547         struct nfs_inode *nfsi = NFS_I(inode);
2548         loff_t end_pos;
2549         int status;
2550
2551         if (!pnfs_layoutcommit_outstanding(inode))
2552                 return 0;
2553
2554         dprintk("--> %s inode %lu\n", __func__, inode->i_ino);
2555
2556         status = -EAGAIN;
2557         if (test_and_set_bit(NFS_INO_LAYOUTCOMMITTING, &nfsi->flags)) {
2558                 if (!sync)
2559                         goto out;
2560                 status = wait_on_bit_lock_action(&nfsi->flags,
2561                                 NFS_INO_LAYOUTCOMMITTING,
2562                                 nfs_wait_bit_killable,
2563                                 TASK_KILLABLE);
2564                 if (status)
2565                         goto out;
2566         }
2567
2568         status = -ENOMEM;
2569         /* Note kzalloc ensures data->res.seq_res.sr_slot == NULL */
2570         data = kzalloc(sizeof(*data), GFP_NOFS);
2571         if (!data)
2572                 goto clear_layoutcommitting;
2573
2574         status = 0;
2575         spin_lock(&inode->i_lock);
2576         if (!test_and_clear_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags))
2577                 goto out_unlock;
2578
2579         INIT_LIST_HEAD(&data->lseg_list);
2580         pnfs_list_write_lseg(inode, &data->lseg_list);
2581
2582         end_pos = nfsi->layout->plh_lwb;
2583
2584         nfs4_stateid_copy(&data->args.stateid, &nfsi->layout->plh_stateid);
2585         spin_unlock(&inode->i_lock);
2586
2587         data->args.inode = inode;
2588         data->cred = get_rpccred(nfsi->layout->plh_lc_cred);
2589         nfs_fattr_init(&data->fattr);
2590         data->args.bitmask = NFS_SERVER(inode)->cache_consistency_bitmask;
2591         data->res.fattr = &data->fattr;
2592         if (end_pos != 0)
2593                 data->args.lastbytewritten = end_pos - 1;
2594         else
2595                 data->args.lastbytewritten = U64_MAX;
2596         data->res.server = NFS_SERVER(inode);
2597
2598         if (ld->prepare_layoutcommit) {
2599                 status = ld->prepare_layoutcommit(&data->args);
2600                 if (status) {
2601                         put_rpccred(data->cred);
2602                         spin_lock(&inode->i_lock);
2603                         set_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags);
2604                         if (end_pos > nfsi->layout->plh_lwb)
2605                                 nfsi->layout->plh_lwb = end_pos;
2606                         goto out_unlock;
2607                 }
2608         }
2609
2610
2611         status = nfs4_proc_layoutcommit(data, sync);
2612 out:
2613         if (status)
2614                 mark_inode_dirty_sync(inode);
2615         dprintk("<-- %s status %d\n", __func__, status);
2616         return status;
2617 out_unlock:
2618         spin_unlock(&inode->i_lock);
2619         kfree(data);
2620 clear_layoutcommitting:
2621         pnfs_clear_layoutcommitting(inode);
2622         goto out;
2623 }
2624 EXPORT_SYMBOL_GPL(pnfs_layoutcommit_inode);
2625
2626 int
2627 pnfs_generic_sync(struct inode *inode, bool datasync)
2628 {
2629         return pnfs_layoutcommit_inode(inode, true);
2630 }
2631 EXPORT_SYMBOL_GPL(pnfs_generic_sync);
2632
2633 struct nfs4_threshold *pnfs_mdsthreshold_alloc(void)
2634 {
2635         struct nfs4_threshold *thp;
2636
2637         thp = kzalloc(sizeof(*thp), GFP_NOFS);
2638         if (!thp) {
2639                 dprintk("%s mdsthreshold allocation failed\n", __func__);
2640                 return NULL;
2641         }
2642         return thp;
2643 }
2644
2645 #if IS_ENABLED(CONFIG_NFS_V4_2)
2646 int
2647 pnfs_report_layoutstat(struct inode *inode, gfp_t gfp_flags)
2648 {
2649         struct pnfs_layoutdriver_type *ld = NFS_SERVER(inode)->pnfs_curr_ld;
2650         struct nfs_server *server = NFS_SERVER(inode);
2651         struct nfs_inode *nfsi = NFS_I(inode);
2652         struct nfs42_layoutstat_data *data;
2653         struct pnfs_layout_hdr *hdr;
2654         int status = 0;
2655
2656         if (!pnfs_enabled_sb(server) || !ld->prepare_layoutstats)
2657                 goto out;
2658
2659         if (!nfs_server_capable(inode, NFS_CAP_LAYOUTSTATS))
2660                 goto out;
2661
2662         if (test_and_set_bit(NFS_INO_LAYOUTSTATS, &nfsi->flags))
2663                 goto out;
2664
2665         spin_lock(&inode->i_lock);
2666         if (!NFS_I(inode)->layout) {
2667                 spin_unlock(&inode->i_lock);
2668                 goto out_clear_layoutstats;
2669         }
2670         hdr = NFS_I(inode)->layout;
2671         pnfs_get_layout_hdr(hdr);
2672         spin_unlock(&inode->i_lock);
2673
2674         data = kzalloc(sizeof(*data), gfp_flags);
2675         if (!data) {
2676                 status = -ENOMEM;
2677                 goto out_put;
2678         }
2679
2680         data->args.fh = NFS_FH(inode);
2681         data->args.inode = inode;
2682         status = ld->prepare_layoutstats(&data->args);
2683         if (status)
2684                 goto out_free;
2685
2686         status = nfs42_proc_layoutstats_generic(NFS_SERVER(inode), data);
2687
2688 out:
2689         dprintk("%s returns %d\n", __func__, status);
2690         return status;
2691
2692 out_free:
2693         kfree(data);
2694 out_put:
2695         pnfs_put_layout_hdr(hdr);
2696 out_clear_layoutstats:
2697         smp_mb__before_atomic();
2698         clear_bit(NFS_INO_LAYOUTSTATS, &nfsi->flags);
2699         smp_mb__after_atomic();
2700         goto out;
2701 }
2702 EXPORT_SYMBOL_GPL(pnfs_report_layoutstat);
2703 #endif
2704
2705 unsigned int layoutstats_timer;
2706 module_param(layoutstats_timer, uint, 0644);
2707 EXPORT_SYMBOL_GPL(layoutstats_timer);