Merge tag 'kbuild-v4.15-2' of git://git.kernel.org/pub/scm/linux/kernel/git/masahiroy...
[sfrench/cifs-2.6.git] / fs / afs / write.c
1 /* handling of writes to regular files and writing back to the server
2  *
3  * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4  * Written by David Howells (dhowells@redhat.com)
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version
9  * 2 of the License, or (at your option) any later version.
10  */
11
12 #include <linux/backing-dev.h>
13 #include <linux/slab.h>
14 #include <linux/fs.h>
15 #include <linux/pagemap.h>
16 #include <linux/writeback.h>
17 #include <linux/pagevec.h>
18 #include "internal.h"
19
20 /*
21  * mark a page as having been made dirty and thus needing writeback
22  */
23 int afs_set_page_dirty(struct page *page)
24 {
25         _enter("");
26         return __set_page_dirty_nobuffers(page);
27 }
28
29 /*
30  * partly or wholly fill a page that's under preparation for writing
31  */
32 static int afs_fill_page(struct afs_vnode *vnode, struct key *key,
33                          loff_t pos, unsigned int len, struct page *page)
34 {
35         struct afs_read *req;
36         int ret;
37
38         _enter(",,%llu", (unsigned long long)pos);
39
40         req = kzalloc(sizeof(struct afs_read) + sizeof(struct page *),
41                       GFP_KERNEL);
42         if (!req)
43                 return -ENOMEM;
44
45         atomic_set(&req->usage, 1);
46         req->pos = pos;
47         req->len = len;
48         req->nr_pages = 1;
49         req->pages[0] = page;
50         get_page(page);
51
52         ret = afs_fetch_data(vnode, key, req);
53         afs_put_read(req);
54         if (ret < 0) {
55                 if (ret == -ENOENT) {
56                         _debug("got NOENT from server"
57                                " - marking file deleted and stale");
58                         set_bit(AFS_VNODE_DELETED, &vnode->flags);
59                         ret = -ESTALE;
60                 }
61         }
62
63         _leave(" = %d", ret);
64         return ret;
65 }
66
67 /*
68  * prepare to perform part of a write to a page
69  */
70 int afs_write_begin(struct file *file, struct address_space *mapping,
71                     loff_t pos, unsigned len, unsigned flags,
72                     struct page **pagep, void **fsdata)
73 {
74         struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
75         struct page *page;
76         struct key *key = afs_file_key(file);
77         unsigned long priv;
78         unsigned f, from = pos & (PAGE_SIZE - 1);
79         unsigned t, to = from + len;
80         pgoff_t index = pos >> PAGE_SHIFT;
81         int ret;
82
83         _enter("{%x:%u},{%lx},%u,%u",
84                vnode->fid.vid, vnode->fid.vnode, index, from, to);
85
86         /* We want to store information about how much of a page is altered in
87          * page->private.
88          */
89         BUILD_BUG_ON(PAGE_SIZE > 32768 && sizeof(page->private) < 8);
90
91         page = grab_cache_page_write_begin(mapping, index, flags);
92         if (!page)
93                 return -ENOMEM;
94
95         if (!PageUptodate(page) && len != PAGE_SIZE) {
96                 ret = afs_fill_page(vnode, key, pos & PAGE_MASK, PAGE_SIZE, page);
97                 if (ret < 0) {
98                         unlock_page(page);
99                         put_page(page);
100                         _leave(" = %d [prep]", ret);
101                         return ret;
102                 }
103                 SetPageUptodate(page);
104         }
105
106         /* page won't leak in error case: it eventually gets cleaned off LRU */
107         *pagep = page;
108
109 try_again:
110         /* See if this page is already partially written in a way that we can
111          * merge the new write with.
112          */
113         t = f = 0;
114         if (PagePrivate(page)) {
115                 priv = page_private(page);
116                 f = priv & AFS_PRIV_MAX;
117                 t = priv >> AFS_PRIV_SHIFT;
118                 ASSERTCMP(f, <=, t);
119         }
120
121         if (f != t) {
122                 if (PageWriteback(page)) {
123                         trace_afs_page_dirty(vnode, tracepoint_string("alrdy"),
124                                              page->index, priv);
125                         goto flush_conflicting_write;
126                 }
127                 if (to < f || from > t)
128                         goto flush_conflicting_write;
129                 if (from < f)
130                         f = from;
131                 if (to > t)
132                         t = to;
133         } else {
134                 f = from;
135                 t = to;
136         }
137
138         priv = (unsigned long)t << AFS_PRIV_SHIFT;
139         priv |= f;
140         trace_afs_page_dirty(vnode, tracepoint_string("begin"),
141                              page->index, priv);
142         SetPagePrivate(page);
143         set_page_private(page, priv);
144         _leave(" = 0");
145         return 0;
146
147         /* The previous write and this write aren't adjacent or overlapping, so
148          * flush the page out.
149          */
150 flush_conflicting_write:
151         _debug("flush conflict");
152         ret = write_one_page(page);
153         if (ret < 0) {
154                 _leave(" = %d", ret);
155                 return ret;
156         }
157
158         ret = lock_page_killable(page);
159         if (ret < 0) {
160                 _leave(" = %d", ret);
161                 return ret;
162         }
163         goto try_again;
164 }
165
166 /*
167  * finalise part of a write to a page
168  */
169 int afs_write_end(struct file *file, struct address_space *mapping,
170                   loff_t pos, unsigned len, unsigned copied,
171                   struct page *page, void *fsdata)
172 {
173         struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
174         struct key *key = afs_file_key(file);
175         loff_t i_size, maybe_i_size;
176         int ret;
177
178         _enter("{%x:%u},{%lx}",
179                vnode->fid.vid, vnode->fid.vnode, page->index);
180
181         maybe_i_size = pos + copied;
182
183         i_size = i_size_read(&vnode->vfs_inode);
184         if (maybe_i_size > i_size) {
185                 spin_lock(&vnode->wb_lock);
186                 i_size = i_size_read(&vnode->vfs_inode);
187                 if (maybe_i_size > i_size)
188                         i_size_write(&vnode->vfs_inode, maybe_i_size);
189                 spin_unlock(&vnode->wb_lock);
190         }
191
192         if (!PageUptodate(page)) {
193                 if (copied < len) {
194                         /* Try and load any missing data from the server.  The
195                          * unmarshalling routine will take care of clearing any
196                          * bits that are beyond the EOF.
197                          */
198                         ret = afs_fill_page(vnode, key, pos + copied,
199                                             len - copied, page);
200                         if (ret < 0)
201                                 return ret;
202                 }
203                 SetPageUptodate(page);
204         }
205
206         set_page_dirty(page);
207         if (PageDirty(page))
208                 _debug("dirtied");
209         unlock_page(page);
210         put_page(page);
211
212         return copied;
213 }
214
215 /*
216  * kill all the pages in the given range
217  */
218 static void afs_kill_pages(struct address_space *mapping,
219                            pgoff_t first, pgoff_t last)
220 {
221         struct afs_vnode *vnode = AFS_FS_I(mapping->host);
222         struct pagevec pv;
223         unsigned count, loop;
224
225         _enter("{%x:%u},%lx-%lx",
226                vnode->fid.vid, vnode->fid.vnode, first, last);
227
228         pagevec_init(&pv);
229
230         do {
231                 _debug("kill %lx-%lx", first, last);
232
233                 count = last - first + 1;
234                 if (count > PAGEVEC_SIZE)
235                         count = PAGEVEC_SIZE;
236                 pv.nr = find_get_pages_contig(mapping, first, count, pv.pages);
237                 ASSERTCMP(pv.nr, ==, count);
238
239                 for (loop = 0; loop < count; loop++) {
240                         struct page *page = pv.pages[loop];
241                         ClearPageUptodate(page);
242                         SetPageError(page);
243                         end_page_writeback(page);
244                         if (page->index >= first)
245                                 first = page->index + 1;
246                         lock_page(page);
247                         generic_error_remove_page(mapping, page);
248                 }
249
250                 __pagevec_release(&pv);
251         } while (first <= last);
252
253         _leave("");
254 }
255
256 /*
257  * Redirty all the pages in a given range.
258  */
259 static void afs_redirty_pages(struct writeback_control *wbc,
260                               struct address_space *mapping,
261                               pgoff_t first, pgoff_t last)
262 {
263         struct afs_vnode *vnode = AFS_FS_I(mapping->host);
264         struct pagevec pv;
265         unsigned count, loop;
266
267         _enter("{%x:%u},%lx-%lx",
268                vnode->fid.vid, vnode->fid.vnode, first, last);
269
270         pagevec_init(&pv);
271
272         do {
273                 _debug("redirty %lx-%lx", first, last);
274
275                 count = last - first + 1;
276                 if (count > PAGEVEC_SIZE)
277                         count = PAGEVEC_SIZE;
278                 pv.nr = find_get_pages_contig(mapping, first, count, pv.pages);
279                 ASSERTCMP(pv.nr, ==, count);
280
281                 for (loop = 0; loop < count; loop++) {
282                         struct page *page = pv.pages[loop];
283
284                         redirty_page_for_writepage(wbc, page);
285                         end_page_writeback(page);
286                         if (page->index >= first)
287                                 first = page->index + 1;
288                 }
289
290                 __pagevec_release(&pv);
291         } while (first <= last);
292
293         _leave("");
294 }
295
296 /*
297  * write to a file
298  */
299 static int afs_store_data(struct address_space *mapping,
300                           pgoff_t first, pgoff_t last,
301                           unsigned offset, unsigned to)
302 {
303         struct afs_vnode *vnode = AFS_FS_I(mapping->host);
304         struct afs_fs_cursor fc;
305         struct afs_wb_key *wbk = NULL;
306         struct list_head *p;
307         int ret = -ENOKEY, ret2;
308
309         _enter("%s{%x:%u.%u},%lx,%lx,%x,%x",
310                vnode->volume->name,
311                vnode->fid.vid,
312                vnode->fid.vnode,
313                vnode->fid.unique,
314                first, last, offset, to);
315
316         spin_lock(&vnode->wb_lock);
317         p = vnode->wb_keys.next;
318
319         /* Iterate through the list looking for a valid key to use. */
320 try_next_key:
321         while (p != &vnode->wb_keys) {
322                 wbk = list_entry(p, struct afs_wb_key, vnode_link);
323                 _debug("wbk %u", key_serial(wbk->key));
324                 ret2 = key_validate(wbk->key);
325                 if (ret2 == 0)
326                         goto found_key;
327                 if (ret == -ENOKEY)
328                         ret = ret2;
329                 p = p->next;
330         }
331
332         spin_unlock(&vnode->wb_lock);
333         afs_put_wb_key(wbk);
334         _leave(" = %d [no keys]", ret);
335         return ret;
336
337 found_key:
338         refcount_inc(&wbk->usage);
339         spin_unlock(&vnode->wb_lock);
340
341         _debug("USE WB KEY %u", key_serial(wbk->key));
342
343         ret = -ERESTARTSYS;
344         if (afs_begin_vnode_operation(&fc, vnode, wbk->key)) {
345                 while (afs_select_fileserver(&fc)) {
346                         fc.cb_break = vnode->cb_break + vnode->cb_s_break;
347                         afs_fs_store_data(&fc, mapping, first, last, offset, to);
348                 }
349
350                 afs_check_for_remote_deletion(&fc, fc.vnode);
351                 afs_vnode_commit_status(&fc, vnode, fc.cb_break);
352                 ret = afs_end_vnode_operation(&fc);
353         }
354
355         switch (ret) {
356         case -EACCES:
357         case -EPERM:
358         case -ENOKEY:
359         case -EKEYEXPIRED:
360         case -EKEYREJECTED:
361         case -EKEYREVOKED:
362                 _debug("next");
363                 spin_lock(&vnode->wb_lock);
364                 p = wbk->vnode_link.next;
365                 afs_put_wb_key(wbk);
366                 goto try_next_key;
367         }
368
369         afs_put_wb_key(wbk);
370         _leave(" = %d", ret);
371         return ret;
372 }
373
374 /*
375  * Synchronously write back the locked page and any subsequent non-locked dirty
376  * pages.
377  */
378 static int afs_write_back_from_locked_page(struct address_space *mapping,
379                                            struct writeback_control *wbc,
380                                            struct page *primary_page,
381                                            pgoff_t final_page)
382 {
383         struct afs_vnode *vnode = AFS_FS_I(mapping->host);
384         struct page *pages[8], *page;
385         unsigned long count, priv;
386         unsigned n, offset, to, f, t;
387         pgoff_t start, first, last;
388         int loop, ret;
389
390         _enter(",%lx", primary_page->index);
391
392         count = 1;
393         if (test_set_page_writeback(primary_page))
394                 BUG();
395
396         /* Find all consecutive lockable dirty pages that have contiguous
397          * written regions, stopping when we find a page that is not
398          * immediately lockable, is not dirty or is missing, or we reach the
399          * end of the range.
400          */
401         start = primary_page->index;
402         priv = page_private(primary_page);
403         offset = priv & AFS_PRIV_MAX;
404         to = priv >> AFS_PRIV_SHIFT;
405         trace_afs_page_dirty(vnode, tracepoint_string("store"),
406                              primary_page->index, priv);
407
408         WARN_ON(offset == to);
409         if (offset == to)
410                 trace_afs_page_dirty(vnode, tracepoint_string("WARN"),
411                                      primary_page->index, priv);
412
413         if (start >= final_page || to < PAGE_SIZE)
414                 goto no_more;
415
416         start++;
417         do {
418                 _debug("more %lx [%lx]", start, count);
419                 n = final_page - start + 1;
420                 if (n > ARRAY_SIZE(pages))
421                         n = ARRAY_SIZE(pages);
422                 n = find_get_pages_contig(mapping, start, ARRAY_SIZE(pages), pages);
423                 _debug("fgpc %u", n);
424                 if (n == 0)
425                         goto no_more;
426                 if (pages[0]->index != start) {
427                         do {
428                                 put_page(pages[--n]);
429                         } while (n > 0);
430                         goto no_more;
431                 }
432
433                 for (loop = 0; loop < n; loop++) {
434                         if (to != PAGE_SIZE)
435                                 break;
436                         page = pages[loop];
437                         if (page->index > final_page)
438                                 break;
439                         if (!trylock_page(page))
440                                 break;
441                         if (!PageDirty(page) || PageWriteback(page)) {
442                                 unlock_page(page);
443                                 break;
444                         }
445
446                         priv = page_private(page);
447                         f = priv & AFS_PRIV_MAX;
448                         t = priv >> AFS_PRIV_SHIFT;
449                         if (f != 0) {
450                                 unlock_page(page);
451                                 break;
452                         }
453                         to = t;
454
455                         trace_afs_page_dirty(vnode, tracepoint_string("store+"),
456                                              page->index, priv);
457
458                         if (!clear_page_dirty_for_io(page))
459                                 BUG();
460                         if (test_set_page_writeback(page))
461                                 BUG();
462                         unlock_page(page);
463                         put_page(page);
464                 }
465                 count += loop;
466                 if (loop < n) {
467                         for (; loop < n; loop++)
468                                 put_page(pages[loop]);
469                         goto no_more;
470                 }
471
472                 start += loop;
473         } while (start <= final_page && count < 65536);
474
475 no_more:
476         /* We now have a contiguous set of dirty pages, each with writeback
477          * set; the first page is still locked at this point, but all the rest
478          * have been unlocked.
479          */
480         unlock_page(primary_page);
481
482         first = primary_page->index;
483         last = first + count - 1;
484
485         _debug("write back %lx[%u..] to %lx[..%u]", first, offset, last, to);
486
487         ret = afs_store_data(mapping, first, last, offset, to);
488         switch (ret) {
489         case 0:
490                 ret = count;
491                 break;
492
493         default:
494                 pr_notice("kAFS: Unexpected error from FS.StoreData %d\n", ret);
495                 /* Fall through */
496         case -EACCES:
497         case -EPERM:
498         case -ENOKEY:
499         case -EKEYEXPIRED:
500         case -EKEYREJECTED:
501         case -EKEYREVOKED:
502                 afs_redirty_pages(wbc, mapping, first, last);
503                 mapping_set_error(mapping, ret);
504                 break;
505
506         case -EDQUOT:
507         case -ENOSPC:
508                 afs_redirty_pages(wbc, mapping, first, last);
509                 mapping_set_error(mapping, -ENOSPC);
510                 break;
511
512         case -EROFS:
513         case -EIO:
514         case -EREMOTEIO:
515         case -EFBIG:
516         case -ENOENT:
517         case -ENOMEDIUM:
518         case -ENXIO:
519                 afs_kill_pages(mapping, first, last);
520                 mapping_set_error(mapping, ret);
521                 break;
522         }
523
524         _leave(" = %d", ret);
525         return ret;
526 }
527
528 /*
529  * write a page back to the server
530  * - the caller locked the page for us
531  */
532 int afs_writepage(struct page *page, struct writeback_control *wbc)
533 {
534         int ret;
535
536         _enter("{%lx},", page->index);
537
538         ret = afs_write_back_from_locked_page(page->mapping, wbc, page,
539                                               wbc->range_end >> PAGE_SHIFT);
540         if (ret < 0) {
541                 _leave(" = %d", ret);
542                 return 0;
543         }
544
545         wbc->nr_to_write -= ret;
546
547         _leave(" = 0");
548         return 0;
549 }
550
551 /*
552  * write a region of pages back to the server
553  */
554 static int afs_writepages_region(struct address_space *mapping,
555                                  struct writeback_control *wbc,
556                                  pgoff_t index, pgoff_t end, pgoff_t *_next)
557 {
558         struct page *page;
559         int ret, n;
560
561         _enter(",,%lx,%lx,", index, end);
562
563         do {
564                 n = find_get_pages_range_tag(mapping, &index, end,
565                                         PAGECACHE_TAG_DIRTY, 1, &page);
566                 if (!n)
567                         break;
568
569                 _debug("wback %lx", page->index);
570
571                 /* at this point we hold neither mapping->tree_lock nor lock on
572                  * the page itself: the page may be truncated or invalidated
573                  * (changing page->mapping to NULL), or even swizzled back from
574                  * swapper_space to tmpfs file mapping
575                  */
576                 ret = lock_page_killable(page);
577                 if (ret < 0) {
578                         put_page(page);
579                         _leave(" = %d", ret);
580                         return ret;
581                 }
582
583                 if (page->mapping != mapping || !PageDirty(page)) {
584                         unlock_page(page);
585                         put_page(page);
586                         continue;
587                 }
588
589                 if (PageWriteback(page)) {
590                         unlock_page(page);
591                         if (wbc->sync_mode != WB_SYNC_NONE)
592                                 wait_on_page_writeback(page);
593                         put_page(page);
594                         continue;
595                 }
596
597                 if (!clear_page_dirty_for_io(page))
598                         BUG();
599                 ret = afs_write_back_from_locked_page(mapping, wbc, page, end);
600                 put_page(page);
601                 if (ret < 0) {
602                         _leave(" = %d", ret);
603                         return ret;
604                 }
605
606                 wbc->nr_to_write -= ret;
607
608                 cond_resched();
609         } while (index < end && wbc->nr_to_write > 0);
610
611         *_next = index;
612         _leave(" = 0 [%lx]", *_next);
613         return 0;
614 }
615
616 /*
617  * write some of the pending data back to the server
618  */
619 int afs_writepages(struct address_space *mapping,
620                    struct writeback_control *wbc)
621 {
622         pgoff_t start, end, next;
623         int ret;
624
625         _enter("");
626
627         if (wbc->range_cyclic) {
628                 start = mapping->writeback_index;
629                 end = -1;
630                 ret = afs_writepages_region(mapping, wbc, start, end, &next);
631                 if (start > 0 && wbc->nr_to_write > 0 && ret == 0)
632                         ret = afs_writepages_region(mapping, wbc, 0, start,
633                                                     &next);
634                 mapping->writeback_index = next;
635         } else if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) {
636                 end = (pgoff_t)(LLONG_MAX >> PAGE_SHIFT);
637                 ret = afs_writepages_region(mapping, wbc, 0, end, &next);
638                 if (wbc->nr_to_write > 0)
639                         mapping->writeback_index = next;
640         } else {
641                 start = wbc->range_start >> PAGE_SHIFT;
642                 end = wbc->range_end >> PAGE_SHIFT;
643                 ret = afs_writepages_region(mapping, wbc, start, end, &next);
644         }
645
646         _leave(" = %d", ret);
647         return ret;
648 }
649
650 /*
651  * completion of write to server
652  */
653 void afs_pages_written_back(struct afs_vnode *vnode, struct afs_call *call)
654 {
655         struct pagevec pv;
656         unsigned long priv;
657         unsigned count, loop;
658         pgoff_t first = call->first, last = call->last;
659
660         _enter("{%x:%u},{%lx-%lx}",
661                vnode->fid.vid, vnode->fid.vnode, first, last);
662
663         pagevec_init(&pv);
664
665         do {
666                 _debug("done %lx-%lx", first, last);
667
668                 count = last - first + 1;
669                 if (count > PAGEVEC_SIZE)
670                         count = PAGEVEC_SIZE;
671                 pv.nr = find_get_pages_contig(vnode->vfs_inode.i_mapping,
672                                               first, count, pv.pages);
673                 ASSERTCMP(pv.nr, ==, count);
674
675                 for (loop = 0; loop < count; loop++) {
676                         priv = page_private(pv.pages[loop]);
677                         trace_afs_page_dirty(vnode, tracepoint_string("clear"),
678                                              pv.pages[loop]->index, priv);
679                         set_page_private(pv.pages[loop], 0);
680                         end_page_writeback(pv.pages[loop]);
681                 }
682                 first += count;
683                 __pagevec_release(&pv);
684         } while (first <= last);
685
686         afs_prune_wb_keys(vnode);
687         _leave("");
688 }
689
690 /*
691  * write to an AFS file
692  */
693 ssize_t afs_file_write(struct kiocb *iocb, struct iov_iter *from)
694 {
695         struct afs_vnode *vnode = AFS_FS_I(file_inode(iocb->ki_filp));
696         ssize_t result;
697         size_t count = iov_iter_count(from);
698
699         _enter("{%x.%u},{%zu},",
700                vnode->fid.vid, vnode->fid.vnode, count);
701
702         if (IS_SWAPFILE(&vnode->vfs_inode)) {
703                 printk(KERN_INFO
704                        "AFS: Attempt to write to active swap file!\n");
705                 return -EBUSY;
706         }
707
708         if (!count)
709                 return 0;
710
711         result = generic_file_write_iter(iocb, from);
712
713         _leave(" = %zd", result);
714         return result;
715 }
716
717 /*
718  * flush any dirty pages for this process, and check for write errors.
719  * - the return status from this call provides a reliable indication of
720  *   whether any write errors occurred for this process.
721  */
722 int afs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
723 {
724         struct inode *inode = file_inode(file);
725         struct afs_vnode *vnode = AFS_FS_I(inode);
726
727         _enter("{%x:%u},{n=%pD},%d",
728                vnode->fid.vid, vnode->fid.vnode, file,
729                datasync);
730
731         return file_write_and_wait_range(file, start, end);
732 }
733
734 /*
735  * Flush out all outstanding writes on a file opened for writing when it is
736  * closed.
737  */
738 int afs_flush(struct file *file, fl_owner_t id)
739 {
740         _enter("");
741
742         if ((file->f_mode & FMODE_WRITE) == 0)
743                 return 0;
744
745         return vfs_fsync(file, 0);
746 }
747
748 /*
749  * notification that a previously read-only page is about to become writable
750  * - if it returns an error, the caller will deliver a bus error signal
751  */
752 int afs_page_mkwrite(struct vm_fault *vmf)
753 {
754         struct file *file = vmf->vma->vm_file;
755         struct inode *inode = file_inode(file);
756         struct afs_vnode *vnode = AFS_FS_I(inode);
757         unsigned long priv;
758
759         _enter("{{%x:%u}},{%lx}",
760                vnode->fid.vid, vnode->fid.vnode, vmf->page->index);
761
762         sb_start_pagefault(inode->i_sb);
763
764         /* Wait for the page to be written to the cache before we allow it to
765          * be modified.  We then assume the entire page will need writing back.
766          */
767 #ifdef CONFIG_AFS_FSCACHE
768         fscache_wait_on_page_write(vnode->cache, vmf->page);
769 #endif
770
771         if (PageWriteback(vmf->page) &&
772             wait_on_page_bit_killable(vmf->page, PG_writeback) < 0)
773                 return VM_FAULT_RETRY;
774
775         if (lock_page_killable(vmf->page) < 0)
776                 return VM_FAULT_RETRY;
777
778         /* We mustn't change page->private until writeback is complete as that
779          * details the portion of the page we need to write back and we might
780          * need to redirty the page if there's a problem.
781          */
782         wait_on_page_writeback(vmf->page);
783
784         priv = (unsigned long)PAGE_SIZE << AFS_PRIV_SHIFT; /* To */
785         priv |= 0; /* From */
786         trace_afs_page_dirty(vnode, tracepoint_string("mkwrite"),
787                              vmf->page->index, priv);
788         SetPagePrivate(vmf->page);
789         set_page_private(vmf->page, priv);
790
791         sb_end_pagefault(inode->i_sb);
792         return VM_FAULT_LOCKED;
793 }
794
795 /*
796  * Prune the keys cached for writeback.  The caller must hold vnode->wb_lock.
797  */
798 void afs_prune_wb_keys(struct afs_vnode *vnode)
799 {
800         LIST_HEAD(graveyard);
801         struct afs_wb_key *wbk, *tmp;
802
803         /* Discard unused keys */
804         spin_lock(&vnode->wb_lock);
805
806         if (!mapping_tagged(&vnode->vfs_inode.i_data, PAGECACHE_TAG_WRITEBACK) &&
807             !mapping_tagged(&vnode->vfs_inode.i_data, PAGECACHE_TAG_DIRTY)) {
808                 list_for_each_entry_safe(wbk, tmp, &vnode->wb_keys, vnode_link) {
809                         if (refcount_read(&wbk->usage) == 1)
810                                 list_move(&wbk->vnode_link, &graveyard);
811                 }
812         }
813
814         spin_unlock(&vnode->wb_lock);
815
816         while (!list_empty(&graveyard)) {
817                 wbk = list_entry(graveyard.next, struct afs_wb_key, vnode_link);
818                 list_del(&wbk->vnode_link);
819                 afs_put_wb_key(wbk);
820         }
821 }
822
823 /*
824  * Clean up a page during invalidation.
825  */
826 int afs_launder_page(struct page *page)
827 {
828         struct address_space *mapping = page->mapping;
829         struct afs_vnode *vnode = AFS_FS_I(mapping->host);
830         unsigned long priv;
831         unsigned int f, t;
832         int ret = 0;
833
834         _enter("{%lx}", page->index);
835
836         priv = page_private(page);
837         if (clear_page_dirty_for_io(page)) {
838                 f = 0;
839                 t = PAGE_SIZE;
840                 if (PagePrivate(page)) {
841                         f = priv & AFS_PRIV_MAX;
842                         t = priv >> AFS_PRIV_SHIFT;
843                 }
844
845                 trace_afs_page_dirty(vnode, tracepoint_string("launder"),
846                                      page->index, priv);
847                 ret = afs_store_data(mapping, page->index, page->index, t, f);
848         }
849
850         trace_afs_page_dirty(vnode, tracepoint_string("laundered"),
851                              page->index, priv);
852         set_page_private(page, 0);
853         ClearPagePrivate(page);
854
855 #ifdef CONFIG_AFS_FSCACHE
856         if (PageFsCache(page)) {
857                 fscache_wait_on_page_write(vnode->cache, page);
858                 fscache_uncache_page(vnode->cache, page);
859         }
860 #endif
861         return ret;
862 }