Merge tag 'clk-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/clk/linux
[sfrench/cifs-2.6.git] / fs / nfs / direct.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * linux/fs/nfs/direct.c
4  *
5  * Copyright (C) 2003 by Chuck Lever <cel@netapp.com>
6  *
7  * High-performance uncached I/O for the Linux NFS client
8  *
9  * There are important applications whose performance or correctness
10  * depends on uncached access to file data.  Database clusters
11  * (multiple copies of the same instance running on separate hosts)
12  * implement their own cache coherency protocol that subsumes file
13  * system cache protocols.  Applications that process datasets
14  * considerably larger than the client's memory do not always benefit
15  * from a local cache.  A streaming video server, for instance, has no
16  * need to cache the contents of a file.
17  *
18  * When an application requests uncached I/O, all read and write requests
19  * are made directly to the server; data stored or fetched via these
20  * requests is not cached in the Linux page cache.  The client does not
21  * correct unaligned requests from applications.  All requested bytes are
22  * held on permanent storage before a direct write system call returns to
23  * an application.
24  *
25  * Solaris implements an uncached I/O facility called directio() that
26  * is used for backups and sequential I/O to very large files.  Solaris
27  * also supports uncaching whole NFS partitions with "-o forcedirectio,"
28  * an undocumented mount option.
29  *
30  * Designed by Jeff Kimmel, Chuck Lever, and Trond Myklebust, with
31  * help from Andrew Morton.
32  *
33  * 18 Dec 2001  Initial implementation for 2.4  --cel
34  * 08 Jul 2002  Version for 2.4.19, with bug fixes --trondmy
35  * 08 Jun 2003  Port to 2.5 APIs  --cel
36  * 31 Mar 2004  Handle direct I/O without VFS support  --cel
37  * 15 Sep 2004  Parallel async reads  --cel
38  * 04 May 2005  support O_DIRECT with aio  --cel
39  *
40  */
41
42 #include <linux/errno.h>
43 #include <linux/sched.h>
44 #include <linux/kernel.h>
45 #include <linux/file.h>
46 #include <linux/pagemap.h>
47 #include <linux/kref.h>
48 #include <linux/slab.h>
49 #include <linux/task_io_accounting_ops.h>
50 #include <linux/module.h>
51
52 #include <linux/nfs_fs.h>
53 #include <linux/nfs_page.h>
54 #include <linux/sunrpc/clnt.h>
55
56 #include <linux/uaccess.h>
57 #include <linux/atomic.h>
58
59 #include "internal.h"
60 #include "iostat.h"
61 #include "pnfs.h"
62
63 #define NFSDBG_FACILITY         NFSDBG_VFS
64
65 static struct kmem_cache *nfs_direct_cachep;
66
67 /*
68  * This represents a set of asynchronous requests that we're waiting on
69  */
70 struct nfs_direct_mirror {
71         ssize_t count;
72 };
73
74 struct nfs_direct_req {
75         struct kref             kref;           /* release manager */
76
77         /* I/O parameters */
78         struct nfs_open_context *ctx;           /* file open context info */
79         struct nfs_lock_context *l_ctx;         /* Lock context info */
80         struct kiocb *          iocb;           /* controlling i/o request */
81         struct inode *          inode;          /* target file of i/o */
82
83         /* completion state */
84         atomic_t                io_count;       /* i/os we're waiting for */
85         spinlock_t              lock;           /* protect completion state */
86
87         struct nfs_direct_mirror mirrors[NFS_PAGEIO_DESCRIPTOR_MIRROR_MAX];
88         int                     mirror_count;
89
90         loff_t                  io_start;       /* Start offset for I/O */
91         ssize_t                 count,          /* bytes actually processed */
92                                 max_count,      /* max expected count */
93                                 bytes_left,     /* bytes left to be sent */
94                                 error;          /* any reported error */
95         struct completion       completion;     /* wait for i/o completion */
96
97         /* commit state */
98         struct nfs_mds_commit_info mds_cinfo;   /* Storage for cinfo */
99         struct pnfs_ds_commit_info ds_cinfo;    /* Storage for cinfo */
100         struct work_struct      work;
101         int                     flags;
102         /* for write */
103 #define NFS_ODIRECT_DO_COMMIT           (1)     /* an unstable reply was received */
104 #define NFS_ODIRECT_RESCHED_WRITES      (2)     /* write verification failed */
105         /* for read */
106 #define NFS_ODIRECT_SHOULD_DIRTY        (3)     /* dirty user-space page after read */
107         struct nfs_writeverf    verf;           /* unstable write verifier */
108 };
109
110 static const struct nfs_pgio_completion_ops nfs_direct_write_completion_ops;
111 static const struct nfs_commit_completion_ops nfs_direct_commit_completion_ops;
112 static void nfs_direct_write_complete(struct nfs_direct_req *dreq);
113 static void nfs_direct_write_schedule_work(struct work_struct *work);
114
115 static inline void get_dreq(struct nfs_direct_req *dreq)
116 {
117         atomic_inc(&dreq->io_count);
118 }
119
120 static inline int put_dreq(struct nfs_direct_req *dreq)
121 {
122         return atomic_dec_and_test(&dreq->io_count);
123 }
124
125 static void
126 nfs_direct_good_bytes(struct nfs_direct_req *dreq, struct nfs_pgio_header *hdr)
127 {
128         int i;
129         ssize_t count;
130
131         WARN_ON_ONCE(dreq->count >= dreq->max_count);
132
133         if (dreq->mirror_count == 1) {
134                 dreq->mirrors[hdr->pgio_mirror_idx].count += hdr->good_bytes;
135                 dreq->count += hdr->good_bytes;
136         } else {
137                 /* mirrored writes */
138                 count = dreq->mirrors[hdr->pgio_mirror_idx].count;
139                 if (count + dreq->io_start < hdr->io_start + hdr->good_bytes) {
140                         count = hdr->io_start + hdr->good_bytes - dreq->io_start;
141                         dreq->mirrors[hdr->pgio_mirror_idx].count = count;
142                 }
143                 /* update the dreq->count by finding the minimum agreed count from all
144                  * mirrors */
145                 count = dreq->mirrors[0].count;
146
147                 for (i = 1; i < dreq->mirror_count; i++)
148                         count = min(count, dreq->mirrors[i].count);
149
150                 dreq->count = count;
151         }
152 }
153
154 /*
155  * nfs_direct_select_verf - select the right verifier
156  * @dreq - direct request possibly spanning multiple servers
157  * @ds_clp - nfs_client of data server or NULL if MDS / non-pnfs
158  * @commit_idx - commit bucket index for the DS
159  *
160  * returns the correct verifier to use given the role of the server
161  */
162 static struct nfs_writeverf *
163 nfs_direct_select_verf(struct nfs_direct_req *dreq,
164                        struct nfs_client *ds_clp,
165                        int commit_idx)
166 {
167         struct nfs_writeverf *verfp = &dreq->verf;
168
169 #ifdef CONFIG_NFS_V4_1
170         /*
171          * pNFS is in use, use the DS verf except commit_through_mds is set
172          * for layout segment where nbuckets is zero.
173          */
174         if (ds_clp && dreq->ds_cinfo.nbuckets > 0) {
175                 if (commit_idx >= 0 && commit_idx < dreq->ds_cinfo.nbuckets)
176                         verfp = &dreq->ds_cinfo.buckets[commit_idx].direct_verf;
177                 else
178                         WARN_ON_ONCE(1);
179         }
180 #endif
181         return verfp;
182 }
183
184
185 /*
186  * nfs_direct_set_hdr_verf - set the write/commit verifier
187  * @dreq - direct request possibly spanning multiple servers
188  * @hdr - pageio header to validate against previously seen verfs
189  *
190  * Set the server's (MDS or DS) "seen" verifier
191  */
192 static void nfs_direct_set_hdr_verf(struct nfs_direct_req *dreq,
193                                     struct nfs_pgio_header *hdr)
194 {
195         struct nfs_writeverf *verfp;
196
197         verfp = nfs_direct_select_verf(dreq, hdr->ds_clp, hdr->ds_commit_idx);
198         WARN_ON_ONCE(verfp->committed >= 0);
199         memcpy(verfp, &hdr->verf, sizeof(struct nfs_writeverf));
200         WARN_ON_ONCE(verfp->committed < 0);
201 }
202
203 static int nfs_direct_cmp_verf(const struct nfs_writeverf *v1,
204                 const struct nfs_writeverf *v2)
205 {
206         return nfs_write_verifier_cmp(&v1->verifier, &v2->verifier);
207 }
208
209 /*
210  * nfs_direct_cmp_hdr_verf - compare verifier for pgio header
211  * @dreq - direct request possibly spanning multiple servers
212  * @hdr - pageio header to validate against previously seen verf
213  *
214  * set the server's "seen" verf if not initialized.
215  * returns result of comparison between @hdr->verf and the "seen"
216  * verf of the server used by @hdr (DS or MDS)
217  */
218 static int nfs_direct_set_or_cmp_hdr_verf(struct nfs_direct_req *dreq,
219                                           struct nfs_pgio_header *hdr)
220 {
221         struct nfs_writeverf *verfp;
222
223         verfp = nfs_direct_select_verf(dreq, hdr->ds_clp, hdr->ds_commit_idx);
224         if (verfp->committed < 0) {
225                 nfs_direct_set_hdr_verf(dreq, hdr);
226                 return 0;
227         }
228         return nfs_direct_cmp_verf(verfp, &hdr->verf);
229 }
230
231 /*
232  * nfs_direct_cmp_commit_data_verf - compare verifier for commit data
233  * @dreq - direct request possibly spanning multiple servers
234  * @data - commit data to validate against previously seen verf
235  *
236  * returns result of comparison between @data->verf and the verf of
237  * the server used by @data (DS or MDS)
238  */
239 static int nfs_direct_cmp_commit_data_verf(struct nfs_direct_req *dreq,
240                                            struct nfs_commit_data *data)
241 {
242         struct nfs_writeverf *verfp;
243
244         verfp = nfs_direct_select_verf(dreq, data->ds_clp,
245                                          data->ds_commit_index);
246
247         /* verifier not set so always fail */
248         if (verfp->committed < 0)
249                 return 1;
250
251         return nfs_direct_cmp_verf(verfp, &data->verf);
252 }
253
254 /**
255  * nfs_direct_IO - NFS address space operation for direct I/O
256  * @iocb: target I/O control block
257  * @iter: I/O buffer
258  *
259  * The presence of this routine in the address space ops vector means
260  * the NFS client supports direct I/O. However, for most direct IO, we
261  * shunt off direct read and write requests before the VFS gets them,
262  * so this method is only ever called for swap.
263  */
264 ssize_t nfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
265 {
266         struct inode *inode = iocb->ki_filp->f_mapping->host;
267
268         /* we only support swap file calling nfs_direct_IO */
269         if (!IS_SWAPFILE(inode))
270                 return 0;
271
272         VM_BUG_ON(iov_iter_count(iter) != PAGE_SIZE);
273
274         if (iov_iter_rw(iter) == READ)
275                 return nfs_file_direct_read(iocb, iter);
276         return nfs_file_direct_write(iocb, iter);
277 }
278
279 static void nfs_direct_release_pages(struct page **pages, unsigned int npages)
280 {
281         unsigned int i;
282         for (i = 0; i < npages; i++)
283                 put_page(pages[i]);
284 }
285
286 void nfs_init_cinfo_from_dreq(struct nfs_commit_info *cinfo,
287                               struct nfs_direct_req *dreq)
288 {
289         cinfo->inode = dreq->inode;
290         cinfo->mds = &dreq->mds_cinfo;
291         cinfo->ds = &dreq->ds_cinfo;
292         cinfo->dreq = dreq;
293         cinfo->completion_ops = &nfs_direct_commit_completion_ops;
294 }
295
296 static inline void nfs_direct_setup_mirroring(struct nfs_direct_req *dreq,
297                                              struct nfs_pageio_descriptor *pgio,
298                                              struct nfs_page *req)
299 {
300         int mirror_count = 1;
301
302         if (pgio->pg_ops->pg_get_mirror_count)
303                 mirror_count = pgio->pg_ops->pg_get_mirror_count(pgio, req);
304
305         dreq->mirror_count = mirror_count;
306 }
307
308 static inline struct nfs_direct_req *nfs_direct_req_alloc(void)
309 {
310         struct nfs_direct_req *dreq;
311
312         dreq = kmem_cache_zalloc(nfs_direct_cachep, GFP_KERNEL);
313         if (!dreq)
314                 return NULL;
315
316         kref_init(&dreq->kref);
317         kref_get(&dreq->kref);
318         init_completion(&dreq->completion);
319         INIT_LIST_HEAD(&dreq->mds_cinfo.list);
320         dreq->verf.committed = NFS_INVALID_STABLE_HOW;  /* not set yet */
321         INIT_WORK(&dreq->work, nfs_direct_write_schedule_work);
322         dreq->mirror_count = 1;
323         spin_lock_init(&dreq->lock);
324
325         return dreq;
326 }
327
328 static void nfs_direct_req_free(struct kref *kref)
329 {
330         struct nfs_direct_req *dreq = container_of(kref, struct nfs_direct_req, kref);
331
332         nfs_free_pnfs_ds_cinfo(&dreq->ds_cinfo);
333         if (dreq->l_ctx != NULL)
334                 nfs_put_lock_context(dreq->l_ctx);
335         if (dreq->ctx != NULL)
336                 put_nfs_open_context(dreq->ctx);
337         kmem_cache_free(nfs_direct_cachep, dreq);
338 }
339
340 static void nfs_direct_req_release(struct nfs_direct_req *dreq)
341 {
342         kref_put(&dreq->kref, nfs_direct_req_free);
343 }
344
345 ssize_t nfs_dreq_bytes_left(struct nfs_direct_req *dreq)
346 {
347         return dreq->bytes_left;
348 }
349 EXPORT_SYMBOL_GPL(nfs_dreq_bytes_left);
350
351 /*
352  * Collects and returns the final error value/byte-count.
353  */
354 static ssize_t nfs_direct_wait(struct nfs_direct_req *dreq)
355 {
356         ssize_t result = -EIOCBQUEUED;
357
358         /* Async requests don't wait here */
359         if (dreq->iocb)
360                 goto out;
361
362         result = wait_for_completion_killable(&dreq->completion);
363
364         if (!result) {
365                 result = dreq->count;
366                 WARN_ON_ONCE(dreq->count < 0);
367         }
368         if (!result)
369                 result = dreq->error;
370
371 out:
372         return (ssize_t) result;
373 }
374
375 /*
376  * Synchronous I/O uses a stack-allocated iocb.  Thus we can't trust
377  * the iocb is still valid here if this is a synchronous request.
378  */
379 static void nfs_direct_complete(struct nfs_direct_req *dreq)
380 {
381         struct inode *inode = dreq->inode;
382
383         inode_dio_end(inode);
384
385         if (dreq->iocb) {
386                 long res = (long) dreq->error;
387                 if (dreq->count != 0) {
388                         res = (long) dreq->count;
389                         WARN_ON_ONCE(dreq->count < 0);
390                 }
391                 dreq->iocb->ki_complete(dreq->iocb, res, 0);
392         }
393
394         complete(&dreq->completion);
395
396         nfs_direct_req_release(dreq);
397 }
398
399 static void nfs_direct_read_completion(struct nfs_pgio_header *hdr)
400 {
401         unsigned long bytes = 0;
402         struct nfs_direct_req *dreq = hdr->dreq;
403
404         if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
405                 goto out_put;
406
407         spin_lock(&dreq->lock);
408         if (test_bit(NFS_IOHDR_ERROR, &hdr->flags) && (hdr->good_bytes == 0))
409                 dreq->error = hdr->error;
410         else
411                 nfs_direct_good_bytes(dreq, hdr);
412
413         spin_unlock(&dreq->lock);
414
415         while (!list_empty(&hdr->pages)) {
416                 struct nfs_page *req = nfs_list_entry(hdr->pages.next);
417                 struct page *page = req->wb_page;
418
419                 if (!PageCompound(page) && bytes < hdr->good_bytes &&
420                     (dreq->flags == NFS_ODIRECT_SHOULD_DIRTY))
421                         set_page_dirty(page);
422                 bytes += req->wb_bytes;
423                 nfs_list_remove_request(req);
424                 nfs_release_request(req);
425         }
426 out_put:
427         if (put_dreq(dreq))
428                 nfs_direct_complete(dreq);
429         hdr->release(hdr);
430 }
431
432 static void nfs_read_sync_pgio_error(struct list_head *head, int error)
433 {
434         struct nfs_page *req;
435
436         while (!list_empty(head)) {
437                 req = nfs_list_entry(head->next);
438                 nfs_list_remove_request(req);
439                 nfs_release_request(req);
440         }
441 }
442
443 static void nfs_direct_pgio_init(struct nfs_pgio_header *hdr)
444 {
445         get_dreq(hdr->dreq);
446 }
447
448 static const struct nfs_pgio_completion_ops nfs_direct_read_completion_ops = {
449         .error_cleanup = nfs_read_sync_pgio_error,
450         .init_hdr = nfs_direct_pgio_init,
451         .completion = nfs_direct_read_completion,
452 };
453
454 /*
455  * For each rsize'd chunk of the user's buffer, dispatch an NFS READ
456  * operation.  If nfs_readdata_alloc() or get_user_pages() fails,
457  * bail and stop sending more reads.  Read length accounting is
458  * handled automatically by nfs_direct_read_result().  Otherwise, if
459  * no requests have been sent, just return an error.
460  */
461
462 static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq,
463                                               struct iov_iter *iter,
464                                               loff_t pos)
465 {
466         struct nfs_pageio_descriptor desc;
467         struct inode *inode = dreq->inode;
468         ssize_t result = -EINVAL;
469         size_t requested_bytes = 0;
470         size_t rsize = max_t(size_t, NFS_SERVER(inode)->rsize, PAGE_SIZE);
471
472         nfs_pageio_init_read(&desc, dreq->inode, false,
473                              &nfs_direct_read_completion_ops);
474         get_dreq(dreq);
475         desc.pg_dreq = dreq;
476         inode_dio_begin(inode);
477
478         while (iov_iter_count(iter)) {
479                 struct page **pagevec;
480                 size_t bytes;
481                 size_t pgbase;
482                 unsigned npages, i;
483
484                 result = iov_iter_get_pages_alloc(iter, &pagevec, 
485                                                   rsize, &pgbase);
486                 if (result < 0)
487                         break;
488         
489                 bytes = result;
490                 iov_iter_advance(iter, bytes);
491                 npages = (result + pgbase + PAGE_SIZE - 1) / PAGE_SIZE;
492                 for (i = 0; i < npages; i++) {
493                         struct nfs_page *req;
494                         unsigned int req_len = min_t(size_t, bytes, PAGE_SIZE - pgbase);
495                         /* XXX do we need to do the eof zeroing found in async_filler? */
496                         req = nfs_create_request(dreq->ctx, pagevec[i],
497                                                  pgbase, req_len);
498                         if (IS_ERR(req)) {
499                                 result = PTR_ERR(req);
500                                 break;
501                         }
502                         req->wb_index = pos >> PAGE_SHIFT;
503                         req->wb_offset = pos & ~PAGE_MASK;
504                         if (!nfs_pageio_add_request(&desc, req)) {
505                                 result = desc.pg_error;
506                                 nfs_release_request(req);
507                                 break;
508                         }
509                         pgbase = 0;
510                         bytes -= req_len;
511                         requested_bytes += req_len;
512                         pos += req_len;
513                         dreq->bytes_left -= req_len;
514                 }
515                 nfs_direct_release_pages(pagevec, npages);
516                 kvfree(pagevec);
517                 if (result < 0)
518                         break;
519         }
520
521         nfs_pageio_complete(&desc);
522
523         /*
524          * If no bytes were started, return the error, and let the
525          * generic layer handle the completion.
526          */
527         if (requested_bytes == 0) {
528                 inode_dio_end(inode);
529                 nfs_direct_req_release(dreq);
530                 return result < 0 ? result : -EIO;
531         }
532
533         if (put_dreq(dreq))
534                 nfs_direct_complete(dreq);
535         return requested_bytes;
536 }
537
538 /**
539  * nfs_file_direct_read - file direct read operation for NFS files
540  * @iocb: target I/O control block
541  * @iter: vector of user buffers into which to read data
542  *
543  * We use this function for direct reads instead of calling
544  * generic_file_aio_read() in order to avoid gfar's check to see if
545  * the request starts before the end of the file.  For that check
546  * to work, we must generate a GETATTR before each direct read, and
547  * even then there is a window between the GETATTR and the subsequent
548  * READ where the file size could change.  Our preference is simply
549  * to do all reads the application wants, and the server will take
550  * care of managing the end of file boundary.
551  *
552  * This function also eliminates unnecessarily updating the file's
553  * atime locally, as the NFS server sets the file's atime, and this
554  * client must read the updated atime from the server back into its
555  * cache.
556  */
557 ssize_t nfs_file_direct_read(struct kiocb *iocb, struct iov_iter *iter)
558 {
559         struct file *file = iocb->ki_filp;
560         struct address_space *mapping = file->f_mapping;
561         struct inode *inode = mapping->host;
562         struct nfs_direct_req *dreq;
563         struct nfs_lock_context *l_ctx;
564         ssize_t result = -EINVAL, requested;
565         size_t count = iov_iter_count(iter);
566         nfs_add_stats(mapping->host, NFSIOS_DIRECTREADBYTES, count);
567
568         dfprintk(FILE, "NFS: direct read(%pD2, %zd@%Ld)\n",
569                 file, count, (long long) iocb->ki_pos);
570
571         result = 0;
572         if (!count)
573                 goto out;
574
575         task_io_account_read(count);
576
577         result = -ENOMEM;
578         dreq = nfs_direct_req_alloc();
579         if (dreq == NULL)
580                 goto out;
581
582         dreq->inode = inode;
583         dreq->bytes_left = dreq->max_count = count;
584         dreq->io_start = iocb->ki_pos;
585         dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp));
586         l_ctx = nfs_get_lock_context(dreq->ctx);
587         if (IS_ERR(l_ctx)) {
588                 result = PTR_ERR(l_ctx);
589                 goto out_release;
590         }
591         dreq->l_ctx = l_ctx;
592         if (!is_sync_kiocb(iocb))
593                 dreq->iocb = iocb;
594
595         if (iter_is_iovec(iter))
596                 dreq->flags = NFS_ODIRECT_SHOULD_DIRTY;
597
598         nfs_start_io_direct(inode);
599
600         NFS_I(inode)->read_io += count;
601         requested = nfs_direct_read_schedule_iovec(dreq, iter, iocb->ki_pos);
602
603         nfs_end_io_direct(inode);
604
605         if (requested > 0) {
606                 result = nfs_direct_wait(dreq);
607                 if (result > 0) {
608                         requested -= result;
609                         iocb->ki_pos += result;
610                 }
611                 iov_iter_revert(iter, requested);
612         } else {
613                 result = requested;
614         }
615
616 out_release:
617         nfs_direct_req_release(dreq);
618 out:
619         return result;
620 }
621
622 static void
623 nfs_direct_write_scan_commit_list(struct inode *inode,
624                                   struct list_head *list,
625                                   struct nfs_commit_info *cinfo)
626 {
627         mutex_lock(&NFS_I(cinfo->inode)->commit_mutex);
628 #ifdef CONFIG_NFS_V4_1
629         if (cinfo->ds != NULL && cinfo->ds->nwritten != 0)
630                 NFS_SERVER(inode)->pnfs_curr_ld->recover_commit_reqs(list, cinfo);
631 #endif
632         nfs_scan_commit_list(&cinfo->mds->list, list, cinfo, 0);
633         mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex);
634 }
635
636 static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
637 {
638         struct nfs_pageio_descriptor desc;
639         struct nfs_page *req, *tmp;
640         LIST_HEAD(reqs);
641         struct nfs_commit_info cinfo;
642         LIST_HEAD(failed);
643         int i;
644
645         nfs_init_cinfo_from_dreq(&cinfo, dreq);
646         nfs_direct_write_scan_commit_list(dreq->inode, &reqs, &cinfo);
647
648         dreq->count = 0;
649         dreq->verf.committed = NFS_INVALID_STABLE_HOW;
650         nfs_clear_pnfs_ds_commit_verifiers(&dreq->ds_cinfo);
651         for (i = 0; i < dreq->mirror_count; i++)
652                 dreq->mirrors[i].count = 0;
653         get_dreq(dreq);
654
655         nfs_pageio_init_write(&desc, dreq->inode, FLUSH_STABLE, false,
656                               &nfs_direct_write_completion_ops);
657         desc.pg_dreq = dreq;
658
659         req = nfs_list_entry(reqs.next);
660         nfs_direct_setup_mirroring(dreq, &desc, req);
661         if (desc.pg_error < 0) {
662                 list_splice_init(&reqs, &failed);
663                 goto out_failed;
664         }
665
666         list_for_each_entry_safe(req, tmp, &reqs, wb_list) {
667                 /* Bump the transmission count */
668                 req->wb_nio++;
669                 if (!nfs_pageio_add_request(&desc, req)) {
670                         nfs_list_move_request(req, &failed);
671                         spin_lock(&cinfo.inode->i_lock);
672                         dreq->flags = 0;
673                         if (desc.pg_error < 0)
674                                 dreq->error = desc.pg_error;
675                         else
676                                 dreq->error = -EIO;
677                         spin_unlock(&cinfo.inode->i_lock);
678                 }
679                 nfs_release_request(req);
680         }
681         nfs_pageio_complete(&desc);
682
683 out_failed:
684         while (!list_empty(&failed)) {
685                 req = nfs_list_entry(failed.next);
686                 nfs_list_remove_request(req);
687                 nfs_unlock_and_release_request(req);
688         }
689
690         if (put_dreq(dreq))
691                 nfs_direct_write_complete(dreq);
692 }
693
694 static void nfs_direct_commit_complete(struct nfs_commit_data *data)
695 {
696         struct nfs_direct_req *dreq = data->dreq;
697         struct nfs_commit_info cinfo;
698         struct nfs_page *req;
699         int status = data->task.tk_status;
700
701         nfs_init_cinfo_from_dreq(&cinfo, dreq);
702         if (status < 0 || nfs_direct_cmp_commit_data_verf(dreq, data))
703                 dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
704
705         while (!list_empty(&data->pages)) {
706                 req = nfs_list_entry(data->pages.next);
707                 nfs_list_remove_request(req);
708                 if (dreq->flags == NFS_ODIRECT_RESCHED_WRITES) {
709                         /*
710                          * Despite the reboot, the write was successful,
711                          * so reset wb_nio.
712                          */
713                         req->wb_nio = 0;
714                         /* Note the rewrite will go through mds */
715                         nfs_mark_request_commit(req, NULL, &cinfo, 0);
716                 } else
717                         nfs_release_request(req);
718                 nfs_unlock_and_release_request(req);
719         }
720
721         if (atomic_dec_and_test(&cinfo.mds->rpcs_out))
722                 nfs_direct_write_complete(dreq);
723 }
724
725 static void nfs_direct_resched_write(struct nfs_commit_info *cinfo,
726                 struct nfs_page *req)
727 {
728         struct nfs_direct_req *dreq = cinfo->dreq;
729
730         spin_lock(&dreq->lock);
731         dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
732         spin_unlock(&dreq->lock);
733         nfs_mark_request_commit(req, NULL, cinfo, 0);
734 }
735
736 static const struct nfs_commit_completion_ops nfs_direct_commit_completion_ops = {
737         .completion = nfs_direct_commit_complete,
738         .resched_write = nfs_direct_resched_write,
739 };
740
741 static void nfs_direct_commit_schedule(struct nfs_direct_req *dreq)
742 {
743         int res;
744         struct nfs_commit_info cinfo;
745         LIST_HEAD(mds_list);
746
747         nfs_init_cinfo_from_dreq(&cinfo, dreq);
748         nfs_scan_commit(dreq->inode, &mds_list, &cinfo);
749         res = nfs_generic_commit_list(dreq->inode, &mds_list, 0, &cinfo);
750         if (res < 0) /* res == -ENOMEM */
751                 nfs_direct_write_reschedule(dreq);
752 }
753
754 static void nfs_direct_write_schedule_work(struct work_struct *work)
755 {
756         struct nfs_direct_req *dreq = container_of(work, struct nfs_direct_req, work);
757         int flags = dreq->flags;
758
759         dreq->flags = 0;
760         switch (flags) {
761                 case NFS_ODIRECT_DO_COMMIT:
762                         nfs_direct_commit_schedule(dreq);
763                         break;
764                 case NFS_ODIRECT_RESCHED_WRITES:
765                         nfs_direct_write_reschedule(dreq);
766                         break;
767                 default:
768                         nfs_zap_mapping(dreq->inode, dreq->inode->i_mapping);
769                         nfs_direct_complete(dreq);
770         }
771 }
772
773 static void nfs_direct_write_complete(struct nfs_direct_req *dreq)
774 {
775         queue_work(nfsiod_workqueue, &dreq->work); /* Calls nfs_direct_write_schedule_work */
776 }
777
778 static void nfs_direct_write_completion(struct nfs_pgio_header *hdr)
779 {
780         struct nfs_direct_req *dreq = hdr->dreq;
781         struct nfs_commit_info cinfo;
782         bool request_commit = false;
783         struct nfs_page *req = nfs_list_entry(hdr->pages.next);
784
785         if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
786                 goto out_put;
787
788         nfs_init_cinfo_from_dreq(&cinfo, dreq);
789
790         spin_lock(&dreq->lock);
791
792         if (test_bit(NFS_IOHDR_ERROR, &hdr->flags))
793                 dreq->error = hdr->error;
794         if (dreq->error == 0) {
795                 nfs_direct_good_bytes(dreq, hdr);
796                 if (nfs_write_need_commit(hdr)) {
797                         if (dreq->flags == NFS_ODIRECT_RESCHED_WRITES)
798                                 request_commit = true;
799                         else if (dreq->flags == 0) {
800                                 nfs_direct_set_hdr_verf(dreq, hdr);
801                                 request_commit = true;
802                                 dreq->flags = NFS_ODIRECT_DO_COMMIT;
803                         } else if (dreq->flags == NFS_ODIRECT_DO_COMMIT) {
804                                 request_commit = true;
805                                 if (nfs_direct_set_or_cmp_hdr_verf(dreq, hdr))
806                                         dreq->flags =
807                                                 NFS_ODIRECT_RESCHED_WRITES;
808                         }
809                 }
810         }
811         spin_unlock(&dreq->lock);
812
813         while (!list_empty(&hdr->pages)) {
814
815                 req = nfs_list_entry(hdr->pages.next);
816                 nfs_list_remove_request(req);
817                 if (request_commit) {
818                         kref_get(&req->wb_kref);
819                         nfs_mark_request_commit(req, hdr->lseg, &cinfo,
820                                 hdr->ds_commit_idx);
821                 }
822                 nfs_unlock_and_release_request(req);
823         }
824
825 out_put:
826         if (put_dreq(dreq))
827                 nfs_direct_write_complete(dreq);
828         hdr->release(hdr);
829 }
830
831 static void nfs_write_sync_pgio_error(struct list_head *head, int error)
832 {
833         struct nfs_page *req;
834
835         while (!list_empty(head)) {
836                 req = nfs_list_entry(head->next);
837                 nfs_list_remove_request(req);
838                 nfs_unlock_and_release_request(req);
839         }
840 }
841
842 static void nfs_direct_write_reschedule_io(struct nfs_pgio_header *hdr)
843 {
844         struct nfs_direct_req *dreq = hdr->dreq;
845
846         spin_lock(&dreq->lock);
847         if (dreq->error == 0) {
848                 dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
849                 /* fake unstable write to let common nfs resend pages */
850                 hdr->verf.committed = NFS_UNSTABLE;
851                 hdr->good_bytes = hdr->args.count;
852         }
853         spin_unlock(&dreq->lock);
854 }
855
856 static const struct nfs_pgio_completion_ops nfs_direct_write_completion_ops = {
857         .error_cleanup = nfs_write_sync_pgio_error,
858         .init_hdr = nfs_direct_pgio_init,
859         .completion = nfs_direct_write_completion,
860         .reschedule_io = nfs_direct_write_reschedule_io,
861 };
862
863
864 /*
865  * NB: Return the value of the first error return code.  Subsequent
866  *     errors after the first one are ignored.
867  */
868 /*
869  * For each wsize'd chunk of the user's buffer, dispatch an NFS WRITE
870  * operation.  If nfs_writedata_alloc() or get_user_pages() fails,
871  * bail and stop sending more writes.  Write length accounting is
872  * handled automatically by nfs_direct_write_result().  Otherwise, if
873  * no requests have been sent, just return an error.
874  */
875 static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
876                                                struct iov_iter *iter,
877                                                loff_t pos)
878 {
879         struct nfs_pageio_descriptor desc;
880         struct inode *inode = dreq->inode;
881         ssize_t result = 0;
882         size_t requested_bytes = 0;
883         size_t wsize = max_t(size_t, NFS_SERVER(inode)->wsize, PAGE_SIZE);
884
885         nfs_pageio_init_write(&desc, inode, FLUSH_COND_STABLE, false,
886                               &nfs_direct_write_completion_ops);
887         desc.pg_dreq = dreq;
888         get_dreq(dreq);
889         inode_dio_begin(inode);
890
891         NFS_I(inode)->write_io += iov_iter_count(iter);
892         while (iov_iter_count(iter)) {
893                 struct page **pagevec;
894                 size_t bytes;
895                 size_t pgbase;
896                 unsigned npages, i;
897
898                 result = iov_iter_get_pages_alloc(iter, &pagevec, 
899                                                   wsize, &pgbase);
900                 if (result < 0)
901                         break;
902
903                 bytes = result;
904                 iov_iter_advance(iter, bytes);
905                 npages = (result + pgbase + PAGE_SIZE - 1) / PAGE_SIZE;
906                 for (i = 0; i < npages; i++) {
907                         struct nfs_page *req;
908                         unsigned int req_len = min_t(size_t, bytes, PAGE_SIZE - pgbase);
909
910                         req = nfs_create_request(dreq->ctx, pagevec[i],
911                                                  pgbase, req_len);
912                         if (IS_ERR(req)) {
913                                 result = PTR_ERR(req);
914                                 break;
915                         }
916
917                         nfs_direct_setup_mirroring(dreq, &desc, req);
918                         if (desc.pg_error < 0) {
919                                 nfs_free_request(req);
920                                 result = desc.pg_error;
921                                 break;
922                         }
923
924                         nfs_lock_request(req);
925                         req->wb_index = pos >> PAGE_SHIFT;
926                         req->wb_offset = pos & ~PAGE_MASK;
927                         if (!nfs_pageio_add_request(&desc, req)) {
928                                 result = desc.pg_error;
929                                 nfs_unlock_and_release_request(req);
930                                 break;
931                         }
932                         pgbase = 0;
933                         bytes -= req_len;
934                         requested_bytes += req_len;
935                         pos += req_len;
936                         dreq->bytes_left -= req_len;
937                 }
938                 nfs_direct_release_pages(pagevec, npages);
939                 kvfree(pagevec);
940                 if (result < 0)
941                         break;
942         }
943         nfs_pageio_complete(&desc);
944
945         /*
946          * If no bytes were started, return the error, and let the
947          * generic layer handle the completion.
948          */
949         if (requested_bytes == 0) {
950                 inode_dio_end(inode);
951                 nfs_direct_req_release(dreq);
952                 return result < 0 ? result : -EIO;
953         }
954
955         if (put_dreq(dreq))
956                 nfs_direct_write_complete(dreq);
957         return requested_bytes;
958 }
959
960 /**
961  * nfs_file_direct_write - file direct write operation for NFS files
962  * @iocb: target I/O control block
963  * @iter: vector of user buffers from which to write data
964  *
965  * We use this function for direct writes instead of calling
966  * generic_file_aio_write() in order to avoid taking the inode
967  * semaphore and updating the i_size.  The NFS server will set
968  * the new i_size and this client must read the updated size
969  * back into its cache.  We let the server do generic write
970  * parameter checking and report problems.
971  *
972  * We eliminate local atime updates, see direct read above.
973  *
974  * We avoid unnecessary page cache invalidations for normal cached
975  * readers of this file.
976  *
977  * Note that O_APPEND is not supported for NFS direct writes, as there
978  * is no atomic O_APPEND write facility in the NFS protocol.
979  */
980 ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter)
981 {
982         ssize_t result = -EINVAL, requested;
983         size_t count;
984         struct file *file = iocb->ki_filp;
985         struct address_space *mapping = file->f_mapping;
986         struct inode *inode = mapping->host;
987         struct nfs_direct_req *dreq;
988         struct nfs_lock_context *l_ctx;
989         loff_t pos, end;
990
991         dfprintk(FILE, "NFS: direct write(%pD2, %zd@%Ld)\n",
992                 file, iov_iter_count(iter), (long long) iocb->ki_pos);
993
994         result = generic_write_checks(iocb, iter);
995         if (result <= 0)
996                 return result;
997         count = result;
998         nfs_add_stats(mapping->host, NFSIOS_DIRECTWRITTENBYTES, count);
999
1000         pos = iocb->ki_pos;
1001         end = (pos + iov_iter_count(iter) - 1) >> PAGE_SHIFT;
1002
1003         task_io_account_write(count);
1004
1005         result = -ENOMEM;
1006         dreq = nfs_direct_req_alloc();
1007         if (!dreq)
1008                 goto out;
1009
1010         dreq->inode = inode;
1011         dreq->bytes_left = dreq->max_count = count;
1012         dreq->io_start = pos;
1013         dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp));
1014         l_ctx = nfs_get_lock_context(dreq->ctx);
1015         if (IS_ERR(l_ctx)) {
1016                 result = PTR_ERR(l_ctx);
1017                 goto out_release;
1018         }
1019         dreq->l_ctx = l_ctx;
1020         if (!is_sync_kiocb(iocb))
1021                 dreq->iocb = iocb;
1022
1023         nfs_start_io_direct(inode);
1024
1025         requested = nfs_direct_write_schedule_iovec(dreq, iter, pos);
1026
1027         if (mapping->nrpages) {
1028                 invalidate_inode_pages2_range(mapping,
1029                                               pos >> PAGE_SHIFT, end);
1030         }
1031
1032         nfs_end_io_direct(inode);
1033
1034         if (requested > 0) {
1035                 result = nfs_direct_wait(dreq);
1036                 if (result > 0) {
1037                         requested -= result;
1038                         iocb->ki_pos = pos + result;
1039                         /* XXX: should check the generic_write_sync retval */
1040                         generic_write_sync(iocb, result);
1041                 }
1042                 iov_iter_revert(iter, requested);
1043         } else {
1044                 result = requested;
1045         }
1046 out_release:
1047         nfs_direct_req_release(dreq);
1048 out:
1049         return result;
1050 }
1051
1052 /**
1053  * nfs_init_directcache - create a slab cache for nfs_direct_req structures
1054  *
1055  */
1056 int __init nfs_init_directcache(void)
1057 {
1058         nfs_direct_cachep = kmem_cache_create("nfs_direct_cache",
1059                                                 sizeof(struct nfs_direct_req),
1060                                                 0, (SLAB_RECLAIM_ACCOUNT|
1061                                                         SLAB_MEM_SPREAD),
1062                                                 NULL);
1063         if (nfs_direct_cachep == NULL)
1064                 return -ENOMEM;
1065
1066         return 0;
1067 }
1068
1069 /**
1070  * nfs_destroy_directcache - destroy the slab cache for nfs_direct_req structures
1071  *
1072  */
1073 void nfs_destroy_directcache(void)
1074 {
1075         kmem_cache_destroy(nfs_direct_cachep);
1076 }