NFS: move rw_mode to nfs_pageio_header
[sfrench/cifs-2.6.git] / fs / nfs / read.c
1 /*
2  * linux/fs/nfs/read.c
3  *
4  * Block I/O for NFS
5  *
6  * Partial copy of Linus' read cache modifications to fs/nfs/file.c
7  * modified for async RPC by okir@monad.swb.de
8  */
9
10 #include <linux/time.h>
11 #include <linux/kernel.h>
12 #include <linux/errno.h>
13 #include <linux/fcntl.h>
14 #include <linux/stat.h>
15 #include <linux/mm.h>
16 #include <linux/slab.h>
17 #include <linux/pagemap.h>
18 #include <linux/sunrpc/clnt.h>
19 #include <linux/nfs_fs.h>
20 #include <linux/nfs_page.h>
21 #include <linux/module.h>
22
23 #include "nfs4_fs.h"
24 #include "internal.h"
25 #include "iostat.h"
26 #include "fscache.h"
27 #include "pnfs.h"
28
29 #define NFSDBG_FACILITY         NFSDBG_PAGECACHE
30
31 static const struct nfs_pgio_completion_ops nfs_async_read_completion_ops;
32 static const struct nfs_rw_ops nfs_rw_read_ops;
33
34 static struct kmem_cache *nfs_rdata_cachep;
35
36 static struct nfs_pgio_header *nfs_readhdr_alloc(void)
37 {
38         struct nfs_pgio_header *p = kmem_cache_zalloc(nfs_rdata_cachep, GFP_KERNEL);
39
40         if (p)
41                 p->rw_mode = FMODE_READ;
42         return p;
43 }
44
45 static void nfs_readhdr_free(struct nfs_pgio_header *rhdr)
46 {
47         kmem_cache_free(nfs_rdata_cachep, rhdr);
48 }
49
50 static
51 int nfs_return_empty_page(struct page *page)
52 {
53         zero_user(page, 0, PAGE_SIZE);
54         SetPageUptodate(page);
55         unlock_page(page);
56         return 0;
57 }
58
59 void nfs_pageio_init_read(struct nfs_pageio_descriptor *pgio,
60                               struct inode *inode, bool force_mds,
61                               const struct nfs_pgio_completion_ops *compl_ops)
62 {
63         struct nfs_server *server = NFS_SERVER(inode);
64         const struct nfs_pageio_ops *pg_ops = &nfs_pgio_rw_ops;
65
66 #ifdef CONFIG_NFS_V4_1
67         if (server->pnfs_curr_ld && !force_mds)
68                 pg_ops = server->pnfs_curr_ld->pg_read_ops;
69 #endif
70         nfs_pageio_init(pgio, inode, pg_ops, compl_ops, &nfs_rw_read_ops,
71                         server->rsize, 0, GFP_KERNEL);
72 }
73 EXPORT_SYMBOL_GPL(nfs_pageio_init_read);
74
75 void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor *pgio)
76 {
77         struct nfs_pgio_mirror *mirror;
78
79         if (pgio->pg_ops && pgio->pg_ops->pg_cleanup)
80                 pgio->pg_ops->pg_cleanup(pgio);
81
82         pgio->pg_ops = &nfs_pgio_rw_ops;
83
84         /* read path should never have more than one mirror */
85         WARN_ON_ONCE(pgio->pg_mirror_count != 1);
86
87         mirror = &pgio->pg_mirrors[0];
88         mirror->pg_bsize = NFS_SERVER(pgio->pg_inode)->rsize;
89 }
90 EXPORT_SYMBOL_GPL(nfs_pageio_reset_read_mds);
91
92 static void nfs_readpage_release(struct nfs_page *req)
93 {
94         struct inode *inode = d_inode(req->wb_context->dentry);
95
96         dprintk("NFS: read done (%s/%llu %d@%lld)\n", inode->i_sb->s_id,
97                 (unsigned long long)NFS_FILEID(inode), req->wb_bytes,
98                 (long long)req_offset(req));
99
100         if (nfs_page_group_sync_on_bit(req, PG_UNLOCKPAGE)) {
101                 if (PageUptodate(req->wb_page))
102                         nfs_readpage_to_fscache(inode, req->wb_page, 0);
103
104                 unlock_page(req->wb_page);
105         }
106         nfs_release_request(req);
107 }
108
109 int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode,
110                        struct page *page)
111 {
112         struct nfs_page *new;
113         unsigned int len;
114         struct nfs_pageio_descriptor pgio;
115         struct nfs_pgio_mirror *pgm;
116
117         len = nfs_page_length(page);
118         if (len == 0)
119                 return nfs_return_empty_page(page);
120         new = nfs_create_request(ctx, page, NULL, 0, len);
121         if (IS_ERR(new)) {
122                 unlock_page(page);
123                 return PTR_ERR(new);
124         }
125         if (len < PAGE_SIZE)
126                 zero_user_segment(page, len, PAGE_SIZE);
127
128         nfs_pageio_init_read(&pgio, inode, false,
129                              &nfs_async_read_completion_ops);
130         if (!nfs_pageio_add_request(&pgio, new)) {
131                 nfs_list_remove_request(new);
132                 nfs_readpage_release(new);
133         }
134         nfs_pageio_complete(&pgio);
135
136         /* It doesn't make sense to do mirrored reads! */
137         WARN_ON_ONCE(pgio.pg_mirror_count != 1);
138
139         pgm = &pgio.pg_mirrors[0];
140         NFS_I(inode)->read_io += pgm->pg_bytes_written;
141
142         return pgio.pg_error < 0 ? pgio.pg_error : 0;
143 }
144
145 static void nfs_page_group_set_uptodate(struct nfs_page *req)
146 {
147         if (nfs_page_group_sync_on_bit(req, PG_UPTODATE))
148                 SetPageUptodate(req->wb_page);
149 }
150
151 static void nfs_read_completion(struct nfs_pgio_header *hdr)
152 {
153         unsigned long bytes = 0;
154
155         if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
156                 goto out;
157         while (!list_empty(&hdr->pages)) {
158                 struct nfs_page *req = nfs_list_entry(hdr->pages.next);
159                 struct page *page = req->wb_page;
160                 unsigned long start = req->wb_pgbase;
161                 unsigned long end = req->wb_pgbase + req->wb_bytes;
162
163                 if (test_bit(NFS_IOHDR_EOF, &hdr->flags)) {
164                         /* note: regions of the page not covered by a
165                          * request are zeroed in nfs_readpage_async /
166                          * readpage_async_filler */
167                         if (bytes > hdr->good_bytes) {
168                                 /* nothing in this request was good, so zero
169                                  * the full extent of the request */
170                                 zero_user_segment(page, start, end);
171
172                         } else if (hdr->good_bytes - bytes < req->wb_bytes) {
173                                 /* part of this request has good bytes, but
174                                  * not all. zero the bad bytes */
175                                 start += hdr->good_bytes - bytes;
176                                 WARN_ON(start < req->wb_pgbase);
177                                 zero_user_segment(page, start, end);
178                         }
179                 }
180                 bytes += req->wb_bytes;
181                 if (test_bit(NFS_IOHDR_ERROR, &hdr->flags)) {
182                         if (bytes <= hdr->good_bytes)
183                                 nfs_page_group_set_uptodate(req);
184                 } else
185                         nfs_page_group_set_uptodate(req);
186                 nfs_list_remove_request(req);
187                 nfs_readpage_release(req);
188         }
189 out:
190         hdr->release(hdr);
191 }
192
193 static void nfs_initiate_read(struct nfs_pgio_header *hdr,
194                               struct rpc_message *msg,
195                               const struct nfs_rpc_ops *rpc_ops,
196                               struct rpc_task_setup *task_setup_data, int how)
197 {
198         struct inode *inode = hdr->inode;
199         int swap_flags = IS_SWAPFILE(inode) ? NFS_RPC_SWAPFLAGS : 0;
200
201         task_setup_data->flags |= swap_flags;
202         rpc_ops->read_setup(hdr, msg);
203 }
204
205 static void
206 nfs_async_read_error(struct list_head *head)
207 {
208         struct nfs_page *req;
209
210         while (!list_empty(head)) {
211                 req = nfs_list_entry(head->next);
212                 nfs_list_remove_request(req);
213                 nfs_readpage_release(req);
214         }
215 }
216
217 static const struct nfs_pgio_completion_ops nfs_async_read_completion_ops = {
218         .error_cleanup = nfs_async_read_error,
219         .completion = nfs_read_completion,
220 };
221
222 /*
223  * This is the callback from RPC telling us whether a reply was
224  * received or some error occurred (timeout or socket shutdown).
225  */
226 static int nfs_readpage_done(struct rpc_task *task,
227                              struct nfs_pgio_header *hdr,
228                              struct inode *inode)
229 {
230         int status = NFS_PROTO(inode)->read_done(task, hdr);
231         if (status != 0)
232                 return status;
233
234         nfs_add_stats(inode, NFSIOS_SERVERREADBYTES, hdr->res.count);
235
236         if (task->tk_status == -ESTALE) {
237                 set_bit(NFS_INO_STALE, &NFS_I(inode)->flags);
238                 nfs_mark_for_revalidate(inode);
239         }
240         return 0;
241 }
242
243 static void nfs_readpage_retry(struct rpc_task *task,
244                                struct nfs_pgio_header *hdr)
245 {
246         struct nfs_pgio_args *argp = &hdr->args;
247         struct nfs_pgio_res  *resp = &hdr->res;
248
249         /* This is a short read! */
250         nfs_inc_stats(hdr->inode, NFSIOS_SHORTREAD);
251         /* Has the server at least made some progress? */
252         if (resp->count == 0) {
253                 nfs_set_pgio_error(hdr, -EIO, argp->offset);
254                 return;
255         }
256
257         /* For non rpc-based layout drivers, retry-through-MDS */
258         if (!task->tk_ops) {
259                 hdr->pnfs_error = -EAGAIN;
260                 return;
261         }
262
263         /* Yes, so retry the read at the end of the hdr */
264         hdr->mds_offset += resp->count;
265         argp->offset += resp->count;
266         argp->pgbase += resp->count;
267         argp->count -= resp->count;
268         rpc_restart_call_prepare(task);
269 }
270
271 static void nfs_readpage_result(struct rpc_task *task,
272                                 struct nfs_pgio_header *hdr)
273 {
274         if (hdr->res.eof) {
275                 loff_t bound;
276
277                 bound = hdr->args.offset + hdr->res.count;
278                 spin_lock(&hdr->lock);
279                 if (bound < hdr->io_start + hdr->good_bytes) {
280                         set_bit(NFS_IOHDR_EOF, &hdr->flags);
281                         clear_bit(NFS_IOHDR_ERROR, &hdr->flags);
282                         hdr->good_bytes = bound - hdr->io_start;
283                 }
284                 spin_unlock(&hdr->lock);
285         } else if (hdr->res.count < hdr->args.count)
286                 nfs_readpage_retry(task, hdr);
287 }
288
289 /*
290  * Read a page over NFS.
291  * We read the page synchronously in the following case:
292  *  -   The error flag is set for this page. This happens only when a
293  *      previous async read operation failed.
294  */
295 int nfs_readpage(struct file *file, struct page *page)
296 {
297         struct nfs_open_context *ctx;
298         struct inode *inode = page_file_mapping(page)->host;
299         int             error;
300
301         dprintk("NFS: nfs_readpage (%p %ld@%lu)\n",
302                 page, PAGE_SIZE, page_index(page));
303         nfs_inc_stats(inode, NFSIOS_VFSREADPAGE);
304         nfs_add_stats(inode, NFSIOS_READPAGES, 1);
305
306         /*
307          * Try to flush any pending writes to the file..
308          *
309          * NOTE! Because we own the page lock, there cannot
310          * be any new pending writes generated at this point
311          * for this page (other pages can be written to).
312          */
313         error = nfs_wb_page(inode, page);
314         if (error)
315                 goto out_unlock;
316         if (PageUptodate(page))
317                 goto out_unlock;
318
319         error = -ESTALE;
320         if (NFS_STALE(inode))
321                 goto out_unlock;
322
323         if (file == NULL) {
324                 error = -EBADF;
325                 ctx = nfs_find_open_context(inode, NULL, FMODE_READ);
326                 if (ctx == NULL)
327                         goto out_unlock;
328         } else
329                 ctx = get_nfs_open_context(nfs_file_open_context(file));
330
331         if (!IS_SYNC(inode)) {
332                 error = nfs_readpage_from_fscache(ctx, inode, page);
333                 if (error == 0)
334                         goto out;
335         }
336
337         error = nfs_readpage_async(ctx, inode, page);
338
339 out:
340         put_nfs_open_context(ctx);
341         return error;
342 out_unlock:
343         unlock_page(page);
344         return error;
345 }
346
347 struct nfs_readdesc {
348         struct nfs_pageio_descriptor *pgio;
349         struct nfs_open_context *ctx;
350 };
351
352 static int
353 readpage_async_filler(void *data, struct page *page)
354 {
355         struct nfs_readdesc *desc = (struct nfs_readdesc *)data;
356         struct nfs_page *new;
357         unsigned int len;
358         int error;
359
360         len = nfs_page_length(page);
361         if (len == 0)
362                 return nfs_return_empty_page(page);
363
364         new = nfs_create_request(desc->ctx, page, NULL, 0, len);
365         if (IS_ERR(new))
366                 goto out_error;
367
368         if (len < PAGE_SIZE)
369                 zero_user_segment(page, len, PAGE_SIZE);
370         if (!nfs_pageio_add_request(desc->pgio, new)) {
371                 nfs_list_remove_request(new);
372                 nfs_readpage_release(new);
373                 error = desc->pgio->pg_error;
374                 goto out;
375         }
376         return 0;
377 out_error:
378         error = PTR_ERR(new);
379         unlock_page(page);
380 out:
381         return error;
382 }
383
384 int nfs_readpages(struct file *filp, struct address_space *mapping,
385                 struct list_head *pages, unsigned nr_pages)
386 {
387         struct nfs_pageio_descriptor pgio;
388         struct nfs_pgio_mirror *pgm;
389         struct nfs_readdesc desc = {
390                 .pgio = &pgio,
391         };
392         struct inode *inode = mapping->host;
393         unsigned long npages;
394         int ret = -ESTALE;
395
396         dprintk("NFS: nfs_readpages (%s/%Lu %d)\n",
397                         inode->i_sb->s_id,
398                         (unsigned long long)NFS_FILEID(inode),
399                         nr_pages);
400         nfs_inc_stats(inode, NFSIOS_VFSREADPAGES);
401
402         if (NFS_STALE(inode))
403                 goto out;
404
405         if (filp == NULL) {
406                 desc.ctx = nfs_find_open_context(inode, NULL, FMODE_READ);
407                 if (desc.ctx == NULL)
408                         return -EBADF;
409         } else
410                 desc.ctx = get_nfs_open_context(nfs_file_open_context(filp));
411
412         /* attempt to read as many of the pages as possible from the cache
413          * - this returns -ENOBUFS immediately if the cookie is negative
414          */
415         ret = nfs_readpages_from_fscache(desc.ctx, inode, mapping,
416                                          pages, &nr_pages);
417         if (ret == 0)
418                 goto read_complete; /* all pages were read */
419
420         nfs_pageio_init_read(&pgio, inode, false,
421                              &nfs_async_read_completion_ops);
422
423         ret = read_cache_pages(mapping, pages, readpage_async_filler, &desc);
424         nfs_pageio_complete(&pgio);
425
426         /* It doesn't make sense to do mirrored reads! */
427         WARN_ON_ONCE(pgio.pg_mirror_count != 1);
428
429         pgm = &pgio.pg_mirrors[0];
430         NFS_I(inode)->read_io += pgm->pg_bytes_written;
431         npages = (pgm->pg_bytes_written + PAGE_SIZE - 1) >>
432                  PAGE_SHIFT;
433         nfs_add_stats(inode, NFSIOS_READPAGES, npages);
434 read_complete:
435         put_nfs_open_context(desc.ctx);
436 out:
437         return ret;
438 }
439
440 int __init nfs_init_readpagecache(void)
441 {
442         nfs_rdata_cachep = kmem_cache_create("nfs_read_data",
443                                              sizeof(struct nfs_pgio_header),
444                                              0, SLAB_HWCACHE_ALIGN,
445                                              NULL);
446         if (nfs_rdata_cachep == NULL)
447                 return -ENOMEM;
448
449         return 0;
450 }
451
452 void nfs_destroy_readpagecache(void)
453 {
454         kmem_cache_destroy(nfs_rdata_cachep);
455 }
456
457 static const struct nfs_rw_ops nfs_rw_read_ops = {
458         .rw_alloc_header        = nfs_readhdr_alloc,
459         .rw_free_header         = nfs_readhdr_free,
460         .rw_done                = nfs_readpage_done,
461         .rw_result              = nfs_readpage_result,
462         .rw_initiate            = nfs_initiate_read,
463 };