Merge tag 'sound-fix-4.20-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tiwai...
[sfrench/cifs-2.6.git] / fs / ceph / file.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/ceph/ceph_debug.h>
3 #include <linux/ceph/striper.h>
4
5 #include <linux/module.h>
6 #include <linux/sched.h>
7 #include <linux/slab.h>
8 #include <linux/file.h>
9 #include <linux/mount.h>
10 #include <linux/namei.h>
11 #include <linux/writeback.h>
12 #include <linux/falloc.h>
13
14 #include "super.h"
15 #include "mds_client.h"
16 #include "cache.h"
17
18 static __le32 ceph_flags_sys2wire(u32 flags)
19 {
20         u32 wire_flags = 0;
21
22         switch (flags & O_ACCMODE) {
23         case O_RDONLY:
24                 wire_flags |= CEPH_O_RDONLY;
25                 break;
26         case O_WRONLY:
27                 wire_flags |= CEPH_O_WRONLY;
28                 break;
29         case O_RDWR:
30                 wire_flags |= CEPH_O_RDWR;
31                 break;
32         }
33
34         flags &= ~O_ACCMODE;
35
36 #define ceph_sys2wire(a) if (flags & a) { wire_flags |= CEPH_##a; flags &= ~a; }
37
38         ceph_sys2wire(O_CREAT);
39         ceph_sys2wire(O_EXCL);
40         ceph_sys2wire(O_TRUNC);
41         ceph_sys2wire(O_DIRECTORY);
42         ceph_sys2wire(O_NOFOLLOW);
43
44 #undef ceph_sys2wire
45
46         if (flags)
47                 dout("unused open flags: %x\n", flags);
48
49         return cpu_to_le32(wire_flags);
50 }
51
52 /*
53  * Ceph file operations
54  *
55  * Implement basic open/close functionality, and implement
56  * read/write.
57  *
58  * We implement three modes of file I/O:
59  *  - buffered uses the generic_file_aio_{read,write} helpers
60  *
61  *  - synchronous is used when there is multi-client read/write
62  *    sharing, avoids the page cache, and synchronously waits for an
63  *    ack from the OSD.
64  *
65  *  - direct io takes the variant of the sync path that references
66  *    user pages directly.
67  *
68  * fsync() flushes and waits on dirty pages, but just queues metadata
69  * for writeback: since the MDS can recover size and mtime there is no
70  * need to wait for MDS acknowledgement.
71  */
72
73 /*
74  * How many pages to get in one call to iov_iter_get_pages().  This
75  * determines the size of the on-stack array used as a buffer.
76  */
77 #define ITER_GET_BVECS_PAGES    64
78
79 static ssize_t __iter_get_bvecs(struct iov_iter *iter, size_t maxsize,
80                                 struct bio_vec *bvecs)
81 {
82         size_t size = 0;
83         int bvec_idx = 0;
84
85         if (maxsize > iov_iter_count(iter))
86                 maxsize = iov_iter_count(iter);
87
88         while (size < maxsize) {
89                 struct page *pages[ITER_GET_BVECS_PAGES];
90                 ssize_t bytes;
91                 size_t start;
92                 int idx = 0;
93
94                 bytes = iov_iter_get_pages(iter, pages, maxsize - size,
95                                            ITER_GET_BVECS_PAGES, &start);
96                 if (bytes < 0)
97                         return size ?: bytes;
98
99                 iov_iter_advance(iter, bytes);
100                 size += bytes;
101
102                 for ( ; bytes; idx++, bvec_idx++) {
103                         struct bio_vec bv = {
104                                 .bv_page = pages[idx],
105                                 .bv_len = min_t(int, bytes, PAGE_SIZE - start),
106                                 .bv_offset = start,
107                         };
108
109                         bvecs[bvec_idx] = bv;
110                         bytes -= bv.bv_len;
111                         start = 0;
112                 }
113         }
114
115         return size;
116 }
117
118 /*
119  * iov_iter_get_pages() only considers one iov_iter segment, no matter
120  * what maxsize or maxpages are given.  For ITER_BVEC that is a single
121  * page.
122  *
123  * Attempt to get up to @maxsize bytes worth of pages from @iter.
124  * Return the number of bytes in the created bio_vec array, or an error.
125  */
126 static ssize_t iter_get_bvecs_alloc(struct iov_iter *iter, size_t maxsize,
127                                     struct bio_vec **bvecs, int *num_bvecs)
128 {
129         struct bio_vec *bv;
130         size_t orig_count = iov_iter_count(iter);
131         ssize_t bytes;
132         int npages;
133
134         iov_iter_truncate(iter, maxsize);
135         npages = iov_iter_npages(iter, INT_MAX);
136         iov_iter_reexpand(iter, orig_count);
137
138         /*
139          * __iter_get_bvecs() may populate only part of the array -- zero it
140          * out.
141          */
142         bv = kvmalloc_array(npages, sizeof(*bv), GFP_KERNEL | __GFP_ZERO);
143         if (!bv)
144                 return -ENOMEM;
145
146         bytes = __iter_get_bvecs(iter, maxsize, bv);
147         if (bytes < 0) {
148                 /*
149                  * No pages were pinned -- just free the array.
150                  */
151                 kvfree(bv);
152                 return bytes;
153         }
154
155         *bvecs = bv;
156         *num_bvecs = npages;
157         return bytes;
158 }
159
160 static void put_bvecs(struct bio_vec *bvecs, int num_bvecs, bool should_dirty)
161 {
162         int i;
163
164         for (i = 0; i < num_bvecs; i++) {
165                 if (bvecs[i].bv_page) {
166                         if (should_dirty)
167                                 set_page_dirty_lock(bvecs[i].bv_page);
168                         put_page(bvecs[i].bv_page);
169                 }
170         }
171         kvfree(bvecs);
172 }
173
174 /*
175  * Prepare an open request.  Preallocate ceph_cap to avoid an
176  * inopportune ENOMEM later.
177  */
178 static struct ceph_mds_request *
179 prepare_open_request(struct super_block *sb, int flags, int create_mode)
180 {
181         struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
182         struct ceph_mds_client *mdsc = fsc->mdsc;
183         struct ceph_mds_request *req;
184         int want_auth = USE_ANY_MDS;
185         int op = (flags & O_CREAT) ? CEPH_MDS_OP_CREATE : CEPH_MDS_OP_OPEN;
186
187         if (flags & (O_WRONLY|O_RDWR|O_CREAT|O_TRUNC))
188                 want_auth = USE_AUTH_MDS;
189
190         req = ceph_mdsc_create_request(mdsc, op, want_auth);
191         if (IS_ERR(req))
192                 goto out;
193         req->r_fmode = ceph_flags_to_mode(flags);
194         req->r_args.open.flags = ceph_flags_sys2wire(flags);
195         req->r_args.open.mode = cpu_to_le32(create_mode);
196 out:
197         return req;
198 }
199
200 static int ceph_init_file_info(struct inode *inode, struct file *file,
201                                         int fmode, bool isdir)
202 {
203         struct ceph_file_info *fi;
204
205         dout("%s %p %p 0%o (%s)\n", __func__, inode, file,
206                         inode->i_mode, isdir ? "dir" : "regular");
207         BUG_ON(inode->i_fop->release != ceph_release);
208
209         if (isdir) {
210                 struct ceph_dir_file_info *dfi =
211                         kmem_cache_zalloc(ceph_dir_file_cachep, GFP_KERNEL);
212                 if (!dfi) {
213                         ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
214                         return -ENOMEM;
215                 }
216
217                 file->private_data = dfi;
218                 fi = &dfi->file_info;
219                 dfi->next_offset = 2;
220                 dfi->readdir_cache_idx = -1;
221         } else {
222                 fi = kmem_cache_zalloc(ceph_file_cachep, GFP_KERNEL);
223                 if (!fi) {
224                         ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
225                         return -ENOMEM;
226                 }
227
228                 file->private_data = fi;
229         }
230
231         fi->fmode = fmode;
232         spin_lock_init(&fi->rw_contexts_lock);
233         INIT_LIST_HEAD(&fi->rw_contexts);
234
235         return 0;
236 }
237
238 /*
239  * initialize private struct file data.
240  * if we fail, clean up by dropping fmode reference on the ceph_inode
241  */
242 static int ceph_init_file(struct inode *inode, struct file *file, int fmode)
243 {
244         int ret = 0;
245
246         switch (inode->i_mode & S_IFMT) {
247         case S_IFREG:
248                 ceph_fscache_register_inode_cookie(inode);
249                 ceph_fscache_file_set_cookie(inode, file);
250         case S_IFDIR:
251                 ret = ceph_init_file_info(inode, file, fmode,
252                                                 S_ISDIR(inode->i_mode));
253                 if (ret)
254                         return ret;
255                 break;
256
257         case S_IFLNK:
258                 dout("init_file %p %p 0%o (symlink)\n", inode, file,
259                      inode->i_mode);
260                 ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
261                 break;
262
263         default:
264                 dout("init_file %p %p 0%o (special)\n", inode, file,
265                      inode->i_mode);
266                 /*
267                  * we need to drop the open ref now, since we don't
268                  * have .release set to ceph_release.
269                  */
270                 ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
271                 BUG_ON(inode->i_fop->release == ceph_release);
272
273                 /* call the proper open fop */
274                 ret = inode->i_fop->open(inode, file);
275         }
276         return ret;
277 }
278
279 /*
280  * try renew caps after session gets killed.
281  */
282 int ceph_renew_caps(struct inode *inode)
283 {
284         struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
285         struct ceph_inode_info *ci = ceph_inode(inode);
286         struct ceph_mds_request *req;
287         int err, flags, wanted;
288
289         spin_lock(&ci->i_ceph_lock);
290         wanted = __ceph_caps_file_wanted(ci);
291         if (__ceph_is_any_real_caps(ci) &&
292             (!(wanted & CEPH_CAP_ANY_WR) || ci->i_auth_cap)) {
293                 int issued = __ceph_caps_issued(ci, NULL);
294                 spin_unlock(&ci->i_ceph_lock);
295                 dout("renew caps %p want %s issued %s updating mds_wanted\n",
296                      inode, ceph_cap_string(wanted), ceph_cap_string(issued));
297                 ceph_check_caps(ci, 0, NULL);
298                 return 0;
299         }
300         spin_unlock(&ci->i_ceph_lock);
301
302         flags = 0;
303         if ((wanted & CEPH_CAP_FILE_RD) && (wanted & CEPH_CAP_FILE_WR))
304                 flags = O_RDWR;
305         else if (wanted & CEPH_CAP_FILE_RD)
306                 flags = O_RDONLY;
307         else if (wanted & CEPH_CAP_FILE_WR)
308                 flags = O_WRONLY;
309 #ifdef O_LAZY
310         if (wanted & CEPH_CAP_FILE_LAZYIO)
311                 flags |= O_LAZY;
312 #endif
313
314         req = prepare_open_request(inode->i_sb, flags, 0);
315         if (IS_ERR(req)) {
316                 err = PTR_ERR(req);
317                 goto out;
318         }
319
320         req->r_inode = inode;
321         ihold(inode);
322         req->r_num_caps = 1;
323         req->r_fmode = -1;
324
325         err = ceph_mdsc_do_request(mdsc, NULL, req);
326         ceph_mdsc_put_request(req);
327 out:
328         dout("renew caps %p open result=%d\n", inode, err);
329         return err < 0 ? err : 0;
330 }
331
332 /*
333  * If we already have the requisite capabilities, we can satisfy
334  * the open request locally (no need to request new caps from the
335  * MDS).  We do, however, need to inform the MDS (asynchronously)
336  * if our wanted caps set expands.
337  */
338 int ceph_open(struct inode *inode, struct file *file)
339 {
340         struct ceph_inode_info *ci = ceph_inode(inode);
341         struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
342         struct ceph_mds_client *mdsc = fsc->mdsc;
343         struct ceph_mds_request *req;
344         struct ceph_file_info *fi = file->private_data;
345         int err;
346         int flags, fmode, wanted;
347
348         if (fi) {
349                 dout("open file %p is already opened\n", file);
350                 return 0;
351         }
352
353         /* filter out O_CREAT|O_EXCL; vfs did that already.  yuck. */
354         flags = file->f_flags & ~(O_CREAT|O_EXCL);
355         if (S_ISDIR(inode->i_mode))
356                 flags = O_DIRECTORY;  /* mds likes to know */
357
358         dout("open inode %p ino %llx.%llx file %p flags %d (%d)\n", inode,
359              ceph_vinop(inode), file, flags, file->f_flags);
360         fmode = ceph_flags_to_mode(flags);
361         wanted = ceph_caps_for_mode(fmode);
362
363         /* snapped files are read-only */
364         if (ceph_snap(inode) != CEPH_NOSNAP && (file->f_mode & FMODE_WRITE))
365                 return -EROFS;
366
367         /* trivially open snapdir */
368         if (ceph_snap(inode) == CEPH_SNAPDIR) {
369                 spin_lock(&ci->i_ceph_lock);
370                 __ceph_get_fmode(ci, fmode);
371                 spin_unlock(&ci->i_ceph_lock);
372                 return ceph_init_file(inode, file, fmode);
373         }
374
375         /*
376          * No need to block if we have caps on the auth MDS (for
377          * write) or any MDS (for read).  Update wanted set
378          * asynchronously.
379          */
380         spin_lock(&ci->i_ceph_lock);
381         if (__ceph_is_any_real_caps(ci) &&
382             (((fmode & CEPH_FILE_MODE_WR) == 0) || ci->i_auth_cap)) {
383                 int mds_wanted = __ceph_caps_mds_wanted(ci, true);
384                 int issued = __ceph_caps_issued(ci, NULL);
385
386                 dout("open %p fmode %d want %s issued %s using existing\n",
387                      inode, fmode, ceph_cap_string(wanted),
388                      ceph_cap_string(issued));
389                 __ceph_get_fmode(ci, fmode);
390                 spin_unlock(&ci->i_ceph_lock);
391
392                 /* adjust wanted? */
393                 if ((issued & wanted) != wanted &&
394                     (mds_wanted & wanted) != wanted &&
395                     ceph_snap(inode) != CEPH_SNAPDIR)
396                         ceph_check_caps(ci, 0, NULL);
397
398                 return ceph_init_file(inode, file, fmode);
399         } else if (ceph_snap(inode) != CEPH_NOSNAP &&
400                    (ci->i_snap_caps & wanted) == wanted) {
401                 __ceph_get_fmode(ci, fmode);
402                 spin_unlock(&ci->i_ceph_lock);
403                 return ceph_init_file(inode, file, fmode);
404         }
405
406         spin_unlock(&ci->i_ceph_lock);
407
408         dout("open fmode %d wants %s\n", fmode, ceph_cap_string(wanted));
409         req = prepare_open_request(inode->i_sb, flags, 0);
410         if (IS_ERR(req)) {
411                 err = PTR_ERR(req);
412                 goto out;
413         }
414         req->r_inode = inode;
415         ihold(inode);
416
417         req->r_num_caps = 1;
418         err = ceph_mdsc_do_request(mdsc, NULL, req);
419         if (!err)
420                 err = ceph_init_file(inode, file, req->r_fmode);
421         ceph_mdsc_put_request(req);
422         dout("open result=%d on %llx.%llx\n", err, ceph_vinop(inode));
423 out:
424         return err;
425 }
426
427
428 /*
429  * Do a lookup + open with a single request.  If we get a non-existent
430  * file or symlink, return 1 so the VFS can retry.
431  */
432 int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
433                      struct file *file, unsigned flags, umode_t mode)
434 {
435         struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
436         struct ceph_mds_client *mdsc = fsc->mdsc;
437         struct ceph_mds_request *req;
438         struct dentry *dn;
439         struct ceph_acls_info acls = {};
440         int mask;
441         int err;
442
443         dout("atomic_open %p dentry %p '%pd' %s flags %d mode 0%o\n",
444              dir, dentry, dentry,
445              d_unhashed(dentry) ? "unhashed" : "hashed", flags, mode);
446
447         if (dentry->d_name.len > NAME_MAX)
448                 return -ENAMETOOLONG;
449
450         if (flags & O_CREAT) {
451                 if (ceph_quota_is_max_files_exceeded(dir))
452                         return -EDQUOT;
453                 err = ceph_pre_init_acls(dir, &mode, &acls);
454                 if (err < 0)
455                         return err;
456         }
457
458         /* do the open */
459         req = prepare_open_request(dir->i_sb, flags, mode);
460         if (IS_ERR(req)) {
461                 err = PTR_ERR(req);
462                 goto out_acl;
463         }
464         req->r_dentry = dget(dentry);
465         req->r_num_caps = 2;
466         if (flags & O_CREAT) {
467                 req->r_dentry_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_AUTH_EXCL;
468                 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
469                 if (acls.pagelist) {
470                         req->r_pagelist = acls.pagelist;
471                         acls.pagelist = NULL;
472                 }
473         }
474
475        mask = CEPH_STAT_CAP_INODE | CEPH_CAP_AUTH_SHARED;
476        if (ceph_security_xattr_wanted(dir))
477                mask |= CEPH_CAP_XATTR_SHARED;
478        req->r_args.open.mask = cpu_to_le32(mask);
479
480         req->r_parent = dir;
481         set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags);
482         err = ceph_mdsc_do_request(mdsc,
483                                    (flags & (O_CREAT|O_TRUNC)) ? dir : NULL,
484                                    req);
485         err = ceph_handle_snapdir(req, dentry, err);
486         if (err)
487                 goto out_req;
488
489         if ((flags & O_CREAT) && !req->r_reply_info.head->is_dentry)
490                 err = ceph_handle_notrace_create(dir, dentry);
491
492         if (d_in_lookup(dentry)) {
493                 dn = ceph_finish_lookup(req, dentry, err);
494                 if (IS_ERR(dn))
495                         err = PTR_ERR(dn);
496         } else {
497                 /* we were given a hashed negative dentry */
498                 dn = NULL;
499         }
500         if (err)
501                 goto out_req;
502         if (dn || d_really_is_negative(dentry) || d_is_symlink(dentry)) {
503                 /* make vfs retry on splice, ENOENT, or symlink */
504                 dout("atomic_open finish_no_open on dn %p\n", dn);
505                 err = finish_no_open(file, dn);
506         } else {
507                 dout("atomic_open finish_open on dn %p\n", dn);
508                 if (req->r_op == CEPH_MDS_OP_CREATE && req->r_reply_info.has_create_ino) {
509                         ceph_init_inode_acls(d_inode(dentry), &acls);
510                         file->f_mode |= FMODE_CREATED;
511                 }
512                 err = finish_open(file, dentry, ceph_open);
513         }
514 out_req:
515         if (!req->r_err && req->r_target_inode)
516                 ceph_put_fmode(ceph_inode(req->r_target_inode), req->r_fmode);
517         ceph_mdsc_put_request(req);
518 out_acl:
519         ceph_release_acls_info(&acls);
520         dout("atomic_open result=%d\n", err);
521         return err;
522 }
523
524 int ceph_release(struct inode *inode, struct file *file)
525 {
526         struct ceph_inode_info *ci = ceph_inode(inode);
527
528         if (S_ISDIR(inode->i_mode)) {
529                 struct ceph_dir_file_info *dfi = file->private_data;
530                 dout("release inode %p dir file %p\n", inode, file);
531                 WARN_ON(!list_empty(&dfi->file_info.rw_contexts));
532
533                 ceph_put_fmode(ci, dfi->file_info.fmode);
534
535                 if (dfi->last_readdir)
536                         ceph_mdsc_put_request(dfi->last_readdir);
537                 kfree(dfi->last_name);
538                 kfree(dfi->dir_info);
539                 kmem_cache_free(ceph_dir_file_cachep, dfi);
540         } else {
541                 struct ceph_file_info *fi = file->private_data;
542                 dout("release inode %p regular file %p\n", inode, file);
543                 WARN_ON(!list_empty(&fi->rw_contexts));
544
545                 ceph_put_fmode(ci, fi->fmode);
546                 kmem_cache_free(ceph_file_cachep, fi);
547         }
548
549         /* wake up anyone waiting for caps on this inode */
550         wake_up_all(&ci->i_cap_wq);
551         return 0;
552 }
553
554 enum {
555         HAVE_RETRIED = 1,
556         CHECK_EOF =    2,
557         READ_INLINE =  3,
558 };
559
560 /*
561  * Completely synchronous read and write methods.  Direct from __user
562  * buffer to osd, or directly to user pages (if O_DIRECT).
563  *
564  * If the read spans object boundary, just do multiple reads.  (That's not
565  * atomic, but good enough for now.)
566  *
567  * If we get a short result from the OSD, check against i_size; we need to
568  * only return a short read to the caller if we hit EOF.
569  */
570 static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *to,
571                               int *retry_op)
572 {
573         struct file *file = iocb->ki_filp;
574         struct inode *inode = file_inode(file);
575         struct ceph_inode_info *ci = ceph_inode(inode);
576         struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
577         struct ceph_osd_client *osdc = &fsc->client->osdc;
578         ssize_t ret;
579         u64 off = iocb->ki_pos;
580         u64 len = iov_iter_count(to);
581
582         dout("sync_read on file %p %llu~%u %s\n", file, off, (unsigned)len,
583              (file->f_flags & O_DIRECT) ? "O_DIRECT" : "");
584
585         if (!len)
586                 return 0;
587         /*
588          * flush any page cache pages in this range.  this
589          * will make concurrent normal and sync io slow,
590          * but it will at least behave sensibly when they are
591          * in sequence.
592          */
593         ret = filemap_write_and_wait_range(inode->i_mapping, off, off + len);
594         if (ret < 0)
595                 return ret;
596
597         ret = 0;
598         while ((len = iov_iter_count(to)) > 0) {
599                 struct ceph_osd_request *req;
600                 struct page **pages;
601                 int num_pages;
602                 size_t page_off;
603                 u64 i_size;
604                 bool more;
605
606                 req = ceph_osdc_new_request(osdc, &ci->i_layout,
607                                         ci->i_vino, off, &len, 0, 1,
608                                         CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ,
609                                         NULL, ci->i_truncate_seq,
610                                         ci->i_truncate_size, false);
611                 if (IS_ERR(req)) {
612                         ret = PTR_ERR(req);
613                         break;
614                 }
615
616                 more = len < iov_iter_count(to);
617
618                 if (unlikely(iov_iter_is_pipe(to))) {
619                         ret = iov_iter_get_pages_alloc(to, &pages, len,
620                                                        &page_off);
621                         if (ret <= 0) {
622                                 ceph_osdc_put_request(req);
623                                 ret = -ENOMEM;
624                                 break;
625                         }
626                         num_pages = DIV_ROUND_UP(ret + page_off, PAGE_SIZE);
627                         if (ret < len) {
628                                 len = ret;
629                                 osd_req_op_extent_update(req, 0, len);
630                                 more = false;
631                         }
632                 } else {
633                         num_pages = calc_pages_for(off, len);
634                         page_off = off & ~PAGE_MASK;
635                         pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
636                         if (IS_ERR(pages)) {
637                                 ceph_osdc_put_request(req);
638                                 ret = PTR_ERR(pages);
639                                 break;
640                         }
641                 }
642
643                 osd_req_op_extent_osd_data_pages(req, 0, pages, len, page_off,
644                                                  false, false);
645                 ret = ceph_osdc_start_request(osdc, req, false);
646                 if (!ret)
647                         ret = ceph_osdc_wait_request(osdc, req);
648                 ceph_osdc_put_request(req);
649
650                 i_size = i_size_read(inode);
651                 dout("sync_read %llu~%llu got %zd i_size %llu%s\n",
652                      off, len, ret, i_size, (more ? " MORE" : ""));
653
654                 if (ret == -ENOENT)
655                         ret = 0;
656                 if (ret >= 0 && ret < len && (off + ret < i_size)) {
657                         int zlen = min(len - ret, i_size - off - ret);
658                         int zoff = page_off + ret;
659                         dout("sync_read zero gap %llu~%llu\n",
660                              off + ret, off + ret + zlen);
661                         ceph_zero_page_vector_range(zoff, zlen, pages);
662                         ret += zlen;
663                 }
664
665                 if (unlikely(iov_iter_is_pipe(to))) {
666                         if (ret > 0) {
667                                 iov_iter_advance(to, ret);
668                                 off += ret;
669                         } else {
670                                 iov_iter_advance(to, 0);
671                         }
672                         ceph_put_page_vector(pages, num_pages, false);
673                 } else {
674                         int idx = 0;
675                         size_t left = ret > 0 ? ret : 0;
676                         while (left > 0) {
677                                 size_t len, copied;
678                                 page_off = off & ~PAGE_MASK;
679                                 len = min_t(size_t, left, PAGE_SIZE - page_off);
680                                 copied = copy_page_to_iter(pages[idx++],
681                                                            page_off, len, to);
682                                 off += copied;
683                                 left -= copied;
684                                 if (copied < len) {
685                                         ret = -EFAULT;
686                                         break;
687                                 }
688                         }
689                         ceph_release_page_vector(pages, num_pages);
690                 }
691
692                 if (ret <= 0 || off >= i_size || !more)
693                         break;
694         }
695
696         if (off > iocb->ki_pos) {
697                 if (ret >= 0 &&
698                     iov_iter_count(to) > 0 && off >= i_size_read(inode))
699                         *retry_op = CHECK_EOF;
700                 ret = off - iocb->ki_pos;
701                 iocb->ki_pos = off;
702         }
703
704         dout("sync_read result %zd retry_op %d\n", ret, *retry_op);
705         return ret;
706 }
707
708 struct ceph_aio_request {
709         struct kiocb *iocb;
710         size_t total_len;
711         bool write;
712         bool should_dirty;
713         int error;
714         struct list_head osd_reqs;
715         unsigned num_reqs;
716         atomic_t pending_reqs;
717         struct timespec64 mtime;
718         struct ceph_cap_flush *prealloc_cf;
719 };
720
721 struct ceph_aio_work {
722         struct work_struct work;
723         struct ceph_osd_request *req;
724 };
725
726 static void ceph_aio_retry_work(struct work_struct *work);
727
728 static void ceph_aio_complete(struct inode *inode,
729                               struct ceph_aio_request *aio_req)
730 {
731         struct ceph_inode_info *ci = ceph_inode(inode);
732         int ret;
733
734         if (!atomic_dec_and_test(&aio_req->pending_reqs))
735                 return;
736
737         ret = aio_req->error;
738         if (!ret)
739                 ret = aio_req->total_len;
740
741         dout("ceph_aio_complete %p rc %d\n", inode, ret);
742
743         if (ret >= 0 && aio_req->write) {
744                 int dirty;
745
746                 loff_t endoff = aio_req->iocb->ki_pos + aio_req->total_len;
747                 if (endoff > i_size_read(inode)) {
748                         if (ceph_inode_set_size(inode, endoff))
749                                 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
750                 }
751
752                 spin_lock(&ci->i_ceph_lock);
753                 ci->i_inline_version = CEPH_INLINE_NONE;
754                 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
755                                                &aio_req->prealloc_cf);
756                 spin_unlock(&ci->i_ceph_lock);
757                 if (dirty)
758                         __mark_inode_dirty(inode, dirty);
759
760         }
761
762         ceph_put_cap_refs(ci, (aio_req->write ? CEPH_CAP_FILE_WR :
763                                                 CEPH_CAP_FILE_RD));
764
765         aio_req->iocb->ki_complete(aio_req->iocb, ret, 0);
766
767         ceph_free_cap_flush(aio_req->prealloc_cf);
768         kfree(aio_req);
769 }
770
771 static void ceph_aio_complete_req(struct ceph_osd_request *req)
772 {
773         int rc = req->r_result;
774         struct inode *inode = req->r_inode;
775         struct ceph_aio_request *aio_req = req->r_priv;
776         struct ceph_osd_data *osd_data = osd_req_op_extent_osd_data(req, 0);
777
778         BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_BVECS);
779         BUG_ON(!osd_data->num_bvecs);
780
781         dout("ceph_aio_complete_req %p rc %d bytes %u\n",
782              inode, rc, osd_data->bvec_pos.iter.bi_size);
783
784         if (rc == -EOLDSNAPC) {
785                 struct ceph_aio_work *aio_work;
786                 BUG_ON(!aio_req->write);
787
788                 aio_work = kmalloc(sizeof(*aio_work), GFP_NOFS);
789                 if (aio_work) {
790                         INIT_WORK(&aio_work->work, ceph_aio_retry_work);
791                         aio_work->req = req;
792                         queue_work(ceph_inode_to_client(inode)->wb_wq,
793                                    &aio_work->work);
794                         return;
795                 }
796                 rc = -ENOMEM;
797         } else if (!aio_req->write) {
798                 if (rc == -ENOENT)
799                         rc = 0;
800                 if (rc >= 0 && osd_data->bvec_pos.iter.bi_size > rc) {
801                         struct iov_iter i;
802                         int zlen = osd_data->bvec_pos.iter.bi_size - rc;
803
804                         /*
805                          * If read is satisfied by single OSD request,
806                          * it can pass EOF. Otherwise read is within
807                          * i_size.
808                          */
809                         if (aio_req->num_reqs == 1) {
810                                 loff_t i_size = i_size_read(inode);
811                                 loff_t endoff = aio_req->iocb->ki_pos + rc;
812                                 if (endoff < i_size)
813                                         zlen = min_t(size_t, zlen,
814                                                      i_size - endoff);
815                                 aio_req->total_len = rc + zlen;
816                         }
817
818                         iov_iter_bvec(&i, READ, osd_data->bvec_pos.bvecs,
819                                       osd_data->num_bvecs,
820                                       osd_data->bvec_pos.iter.bi_size);
821                         iov_iter_advance(&i, rc);
822                         iov_iter_zero(zlen, &i);
823                 }
824         }
825
826         put_bvecs(osd_data->bvec_pos.bvecs, osd_data->num_bvecs,
827                   aio_req->should_dirty);
828         ceph_osdc_put_request(req);
829
830         if (rc < 0)
831                 cmpxchg(&aio_req->error, 0, rc);
832
833         ceph_aio_complete(inode, aio_req);
834         return;
835 }
836
837 static void ceph_aio_retry_work(struct work_struct *work)
838 {
839         struct ceph_aio_work *aio_work =
840                 container_of(work, struct ceph_aio_work, work);
841         struct ceph_osd_request *orig_req = aio_work->req;
842         struct ceph_aio_request *aio_req = orig_req->r_priv;
843         struct inode *inode = orig_req->r_inode;
844         struct ceph_inode_info *ci = ceph_inode(inode);
845         struct ceph_snap_context *snapc;
846         struct ceph_osd_request *req;
847         int ret;
848
849         spin_lock(&ci->i_ceph_lock);
850         if (__ceph_have_pending_cap_snap(ci)) {
851                 struct ceph_cap_snap *capsnap =
852                         list_last_entry(&ci->i_cap_snaps,
853                                         struct ceph_cap_snap,
854                                         ci_item);
855                 snapc = ceph_get_snap_context(capsnap->context);
856         } else {
857                 BUG_ON(!ci->i_head_snapc);
858                 snapc = ceph_get_snap_context(ci->i_head_snapc);
859         }
860         spin_unlock(&ci->i_ceph_lock);
861
862         req = ceph_osdc_alloc_request(orig_req->r_osdc, snapc, 1,
863                         false, GFP_NOFS);
864         if (!req) {
865                 ret = -ENOMEM;
866                 req = orig_req;
867                 goto out;
868         }
869
870         req->r_flags = /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE;
871         ceph_oloc_copy(&req->r_base_oloc, &orig_req->r_base_oloc);
872         ceph_oid_copy(&req->r_base_oid, &orig_req->r_base_oid);
873
874         req->r_ops[0] = orig_req->r_ops[0];
875
876         req->r_mtime = aio_req->mtime;
877         req->r_data_offset = req->r_ops[0].extent.offset;
878
879         ret = ceph_osdc_alloc_messages(req, GFP_NOFS);
880         if (ret) {
881                 ceph_osdc_put_request(req);
882                 req = orig_req;
883                 goto out;
884         }
885
886         ceph_osdc_put_request(orig_req);
887
888         req->r_callback = ceph_aio_complete_req;
889         req->r_inode = inode;
890         req->r_priv = aio_req;
891
892         ret = ceph_osdc_start_request(req->r_osdc, req, false);
893 out:
894         if (ret < 0) {
895                 req->r_result = ret;
896                 ceph_aio_complete_req(req);
897         }
898
899         ceph_put_snap_context(snapc);
900         kfree(aio_work);
901 }
902
903 static ssize_t
904 ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
905                        struct ceph_snap_context *snapc,
906                        struct ceph_cap_flush **pcf)
907 {
908         struct file *file = iocb->ki_filp;
909         struct inode *inode = file_inode(file);
910         struct ceph_inode_info *ci = ceph_inode(inode);
911         struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
912         struct ceph_vino vino;
913         struct ceph_osd_request *req;
914         struct bio_vec *bvecs;
915         struct ceph_aio_request *aio_req = NULL;
916         int num_pages = 0;
917         int flags;
918         int ret;
919         struct timespec64 mtime = current_time(inode);
920         size_t count = iov_iter_count(iter);
921         loff_t pos = iocb->ki_pos;
922         bool write = iov_iter_rw(iter) == WRITE;
923         bool should_dirty = !write && iter_is_iovec(iter);
924
925         if (write && ceph_snap(file_inode(file)) != CEPH_NOSNAP)
926                 return -EROFS;
927
928         dout("sync_direct_%s on file %p %lld~%u snapc %p seq %lld\n",
929              (write ? "write" : "read"), file, pos, (unsigned)count,
930              snapc, snapc->seq);
931
932         ret = filemap_write_and_wait_range(inode->i_mapping, pos, pos + count);
933         if (ret < 0)
934                 return ret;
935
936         if (write) {
937                 int ret2 = invalidate_inode_pages2_range(inode->i_mapping,
938                                         pos >> PAGE_SHIFT,
939                                         (pos + count) >> PAGE_SHIFT);
940                 if (ret2 < 0)
941                         dout("invalidate_inode_pages2_range returned %d\n", ret2);
942
943                 flags = /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE;
944         } else {
945                 flags = CEPH_OSD_FLAG_READ;
946         }
947
948         while (iov_iter_count(iter) > 0) {
949                 u64 size = iov_iter_count(iter);
950                 ssize_t len;
951
952                 if (write)
953                         size = min_t(u64, size, fsc->mount_options->wsize);
954                 else
955                         size = min_t(u64, size, fsc->mount_options->rsize);
956
957                 vino = ceph_vino(inode);
958                 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
959                                             vino, pos, &size, 0,
960                                             1,
961                                             write ? CEPH_OSD_OP_WRITE :
962                                                     CEPH_OSD_OP_READ,
963                                             flags, snapc,
964                                             ci->i_truncate_seq,
965                                             ci->i_truncate_size,
966                                             false);
967                 if (IS_ERR(req)) {
968                         ret = PTR_ERR(req);
969                         break;
970                 }
971
972                 len = iter_get_bvecs_alloc(iter, size, &bvecs, &num_pages);
973                 if (len < 0) {
974                         ceph_osdc_put_request(req);
975                         ret = len;
976                         break;
977                 }
978                 if (len != size)
979                         osd_req_op_extent_update(req, 0, len);
980
981                 /*
982                  * To simplify error handling, allow AIO when IO within i_size
983                  * or IO can be satisfied by single OSD request.
984                  */
985                 if (pos == iocb->ki_pos && !is_sync_kiocb(iocb) &&
986                     (len == count || pos + count <= i_size_read(inode))) {
987                         aio_req = kzalloc(sizeof(*aio_req), GFP_KERNEL);
988                         if (aio_req) {
989                                 aio_req->iocb = iocb;
990                                 aio_req->write = write;
991                                 aio_req->should_dirty = should_dirty;
992                                 INIT_LIST_HEAD(&aio_req->osd_reqs);
993                                 if (write) {
994                                         aio_req->mtime = mtime;
995                                         swap(aio_req->prealloc_cf, *pcf);
996                                 }
997                         }
998                         /* ignore error */
999                 }
1000
1001                 if (write) {
1002                         /*
1003                          * throw out any page cache pages in this range. this
1004                          * may block.
1005                          */
1006                         truncate_inode_pages_range(inode->i_mapping, pos,
1007                                         (pos+len) | (PAGE_SIZE - 1));
1008
1009                         req->r_mtime = mtime;
1010                 }
1011
1012                 osd_req_op_extent_osd_data_bvecs(req, 0, bvecs, num_pages, len);
1013
1014                 if (aio_req) {
1015                         aio_req->total_len += len;
1016                         aio_req->num_reqs++;
1017                         atomic_inc(&aio_req->pending_reqs);
1018
1019                         req->r_callback = ceph_aio_complete_req;
1020                         req->r_inode = inode;
1021                         req->r_priv = aio_req;
1022                         list_add_tail(&req->r_unsafe_item, &aio_req->osd_reqs);
1023
1024                         pos += len;
1025                         continue;
1026                 }
1027
1028                 ret = ceph_osdc_start_request(req->r_osdc, req, false);
1029                 if (!ret)
1030                         ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
1031
1032                 size = i_size_read(inode);
1033                 if (!write) {
1034                         if (ret == -ENOENT)
1035                                 ret = 0;
1036                         if (ret >= 0 && ret < len && pos + ret < size) {
1037                                 struct iov_iter i;
1038                                 int zlen = min_t(size_t, len - ret,
1039                                                  size - pos - ret);
1040
1041                                 iov_iter_bvec(&i, READ, bvecs, num_pages, len);
1042                                 iov_iter_advance(&i, ret);
1043                                 iov_iter_zero(zlen, &i);
1044                                 ret += zlen;
1045                         }
1046                         if (ret >= 0)
1047                                 len = ret;
1048                 }
1049
1050                 put_bvecs(bvecs, num_pages, should_dirty);
1051                 ceph_osdc_put_request(req);
1052                 if (ret < 0)
1053                         break;
1054
1055                 pos += len;
1056                 if (!write && pos >= size)
1057                         break;
1058
1059                 if (write && pos > size) {
1060                         if (ceph_inode_set_size(inode, pos))
1061                                 ceph_check_caps(ceph_inode(inode),
1062                                                 CHECK_CAPS_AUTHONLY,
1063                                                 NULL);
1064                 }
1065         }
1066
1067         if (aio_req) {
1068                 LIST_HEAD(osd_reqs);
1069
1070                 if (aio_req->num_reqs == 0) {
1071                         kfree(aio_req);
1072                         return ret;
1073                 }
1074
1075                 ceph_get_cap_refs(ci, write ? CEPH_CAP_FILE_WR :
1076                                               CEPH_CAP_FILE_RD);
1077
1078                 list_splice(&aio_req->osd_reqs, &osd_reqs);
1079                 while (!list_empty(&osd_reqs)) {
1080                         req = list_first_entry(&osd_reqs,
1081                                                struct ceph_osd_request,
1082                                                r_unsafe_item);
1083                         list_del_init(&req->r_unsafe_item);
1084                         if (ret >= 0)
1085                                 ret = ceph_osdc_start_request(req->r_osdc,
1086                                                               req, false);
1087                         if (ret < 0) {
1088                                 req->r_result = ret;
1089                                 ceph_aio_complete_req(req);
1090                         }
1091                 }
1092                 return -EIOCBQUEUED;
1093         }
1094
1095         if (ret != -EOLDSNAPC && pos > iocb->ki_pos) {
1096                 ret = pos - iocb->ki_pos;
1097                 iocb->ki_pos = pos;
1098         }
1099         return ret;
1100 }
1101
1102 /*
1103  * Synchronous write, straight from __user pointer or user pages.
1104  *
1105  * If write spans object boundary, just do multiple writes.  (For a
1106  * correct atomic write, we should e.g. take write locks on all
1107  * objects, rollback on failure, etc.)
1108  */
1109 static ssize_t
1110 ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos,
1111                 struct ceph_snap_context *snapc)
1112 {
1113         struct file *file = iocb->ki_filp;
1114         struct inode *inode = file_inode(file);
1115         struct ceph_inode_info *ci = ceph_inode(inode);
1116         struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1117         struct ceph_vino vino;
1118         struct ceph_osd_request *req;
1119         struct page **pages;
1120         u64 len;
1121         int num_pages;
1122         int written = 0;
1123         int flags;
1124         int ret;
1125         bool check_caps = false;
1126         struct timespec64 mtime = current_time(inode);
1127         size_t count = iov_iter_count(from);
1128
1129         if (ceph_snap(file_inode(file)) != CEPH_NOSNAP)
1130                 return -EROFS;
1131
1132         dout("sync_write on file %p %lld~%u snapc %p seq %lld\n",
1133              file, pos, (unsigned)count, snapc, snapc->seq);
1134
1135         ret = filemap_write_and_wait_range(inode->i_mapping, pos, pos + count);
1136         if (ret < 0)
1137                 return ret;
1138
1139         ret = invalidate_inode_pages2_range(inode->i_mapping,
1140                                             pos >> PAGE_SHIFT,
1141                                             (pos + count) >> PAGE_SHIFT);
1142         if (ret < 0)
1143                 dout("invalidate_inode_pages2_range returned %d\n", ret);
1144
1145         flags = /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE;
1146
1147         while ((len = iov_iter_count(from)) > 0) {
1148                 size_t left;
1149                 int n;
1150
1151                 vino = ceph_vino(inode);
1152                 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
1153                                             vino, pos, &len, 0, 1,
1154                                             CEPH_OSD_OP_WRITE, flags, snapc,
1155                                             ci->i_truncate_seq,
1156                                             ci->i_truncate_size,
1157                                             false);
1158                 if (IS_ERR(req)) {
1159                         ret = PTR_ERR(req);
1160                         break;
1161                 }
1162
1163                 /*
1164                  * write from beginning of first page,
1165                  * regardless of io alignment
1166                  */
1167                 num_pages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1168
1169                 pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
1170                 if (IS_ERR(pages)) {
1171                         ret = PTR_ERR(pages);
1172                         goto out;
1173                 }
1174
1175                 left = len;
1176                 for (n = 0; n < num_pages; n++) {
1177                         size_t plen = min_t(size_t, left, PAGE_SIZE);
1178                         ret = copy_page_from_iter(pages[n], 0, plen, from);
1179                         if (ret != plen) {
1180                                 ret = -EFAULT;
1181                                 break;
1182                         }
1183                         left -= ret;
1184                 }
1185
1186                 if (ret < 0) {
1187                         ceph_release_page_vector(pages, num_pages);
1188                         goto out;
1189                 }
1190
1191                 req->r_inode = inode;
1192
1193                 osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0,
1194                                                 false, true);
1195
1196                 req->r_mtime = mtime;
1197                 ret = ceph_osdc_start_request(&fsc->client->osdc, req, false);
1198                 if (!ret)
1199                         ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
1200
1201 out:
1202                 ceph_osdc_put_request(req);
1203                 if (ret != 0) {
1204                         ceph_set_error_write(ci);
1205                         break;
1206                 }
1207
1208                 ceph_clear_error_write(ci);
1209                 pos += len;
1210                 written += len;
1211                 if (pos > i_size_read(inode)) {
1212                         check_caps = ceph_inode_set_size(inode, pos);
1213                         if (check_caps)
1214                                 ceph_check_caps(ceph_inode(inode),
1215                                                 CHECK_CAPS_AUTHONLY,
1216                                                 NULL);
1217                 }
1218
1219         }
1220
1221         if (ret != -EOLDSNAPC && written > 0) {
1222                 ret = written;
1223                 iocb->ki_pos = pos;
1224         }
1225         return ret;
1226 }
1227
1228 /*
1229  * Wrap generic_file_aio_read with checks for cap bits on the inode.
1230  * Atomically grab references, so that those bits are not released
1231  * back to the MDS mid-read.
1232  *
1233  * Hmm, the sync read case isn't actually async... should it be?
1234  */
1235 static ssize_t ceph_read_iter(struct kiocb *iocb, struct iov_iter *to)
1236 {
1237         struct file *filp = iocb->ki_filp;
1238         struct ceph_file_info *fi = filp->private_data;
1239         size_t len = iov_iter_count(to);
1240         struct inode *inode = file_inode(filp);
1241         struct ceph_inode_info *ci = ceph_inode(inode);
1242         struct page *pinned_page = NULL;
1243         ssize_t ret;
1244         int want, got = 0;
1245         int retry_op = 0, read = 0;
1246
1247 again:
1248         dout("aio_read %p %llx.%llx %llu~%u trying to get caps on %p\n",
1249              inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len, inode);
1250
1251         if (fi->fmode & CEPH_FILE_MODE_LAZY)
1252                 want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO;
1253         else
1254                 want = CEPH_CAP_FILE_CACHE;
1255         ret = ceph_get_caps(ci, CEPH_CAP_FILE_RD, want, -1, &got, &pinned_page);
1256         if (ret < 0)
1257                 return ret;
1258
1259         if ((got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0 ||
1260             (iocb->ki_flags & IOCB_DIRECT) ||
1261             (fi->flags & CEPH_F_SYNC)) {
1262
1263                 dout("aio_sync_read %p %llx.%llx %llu~%u got cap refs on %s\n",
1264                      inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
1265                      ceph_cap_string(got));
1266
1267                 if (ci->i_inline_version == CEPH_INLINE_NONE) {
1268                         if (!retry_op && (iocb->ki_flags & IOCB_DIRECT)) {
1269                                 ret = ceph_direct_read_write(iocb, to,
1270                                                              NULL, NULL);
1271                                 if (ret >= 0 && ret < len)
1272                                         retry_op = CHECK_EOF;
1273                         } else {
1274                                 ret = ceph_sync_read(iocb, to, &retry_op);
1275                         }
1276                 } else {
1277                         retry_op = READ_INLINE;
1278                 }
1279         } else {
1280                 CEPH_DEFINE_RW_CONTEXT(rw_ctx, got);
1281                 dout("aio_read %p %llx.%llx %llu~%u got cap refs on %s\n",
1282                      inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
1283                      ceph_cap_string(got));
1284                 ceph_add_rw_context(fi, &rw_ctx);
1285                 ret = generic_file_read_iter(iocb, to);
1286                 ceph_del_rw_context(fi, &rw_ctx);
1287         }
1288         dout("aio_read %p %llx.%llx dropping cap refs on %s = %d\n",
1289              inode, ceph_vinop(inode), ceph_cap_string(got), (int)ret);
1290         if (pinned_page) {
1291                 put_page(pinned_page);
1292                 pinned_page = NULL;
1293         }
1294         ceph_put_cap_refs(ci, got);
1295         if (retry_op > HAVE_RETRIED && ret >= 0) {
1296                 int statret;
1297                 struct page *page = NULL;
1298                 loff_t i_size;
1299                 if (retry_op == READ_INLINE) {
1300                         page = __page_cache_alloc(GFP_KERNEL);
1301                         if (!page)
1302                                 return -ENOMEM;
1303                 }
1304
1305                 statret = __ceph_do_getattr(inode, page,
1306                                             CEPH_STAT_CAP_INLINE_DATA, !!page);
1307                 if (statret < 0) {
1308                         if (page)
1309                                 __free_page(page);
1310                         if (statret == -ENODATA) {
1311                                 BUG_ON(retry_op != READ_INLINE);
1312                                 goto again;
1313                         }
1314                         return statret;
1315                 }
1316
1317                 i_size = i_size_read(inode);
1318                 if (retry_op == READ_INLINE) {
1319                         BUG_ON(ret > 0 || read > 0);
1320                         if (iocb->ki_pos < i_size &&
1321                             iocb->ki_pos < PAGE_SIZE) {
1322                                 loff_t end = min_t(loff_t, i_size,
1323                                                    iocb->ki_pos + len);
1324                                 end = min_t(loff_t, end, PAGE_SIZE);
1325                                 if (statret < end)
1326                                         zero_user_segment(page, statret, end);
1327                                 ret = copy_page_to_iter(page,
1328                                                 iocb->ki_pos & ~PAGE_MASK,
1329                                                 end - iocb->ki_pos, to);
1330                                 iocb->ki_pos += ret;
1331                                 read += ret;
1332                         }
1333                         if (iocb->ki_pos < i_size && read < len) {
1334                                 size_t zlen = min_t(size_t, len - read,
1335                                                     i_size - iocb->ki_pos);
1336                                 ret = iov_iter_zero(zlen, to);
1337                                 iocb->ki_pos += ret;
1338                                 read += ret;
1339                         }
1340                         __free_pages(page, 0);
1341                         return read;
1342                 }
1343
1344                 /* hit EOF or hole? */
1345                 if (retry_op == CHECK_EOF && iocb->ki_pos < i_size &&
1346                     ret < len) {
1347                         dout("sync_read hit hole, ppos %lld < size %lld"
1348                              ", reading more\n", iocb->ki_pos, i_size);
1349
1350                         read += ret;
1351                         len -= ret;
1352                         retry_op = HAVE_RETRIED;
1353                         goto again;
1354                 }
1355         }
1356
1357         if (ret >= 0)
1358                 ret += read;
1359
1360         return ret;
1361 }
1362
1363 /*
1364  * Take cap references to avoid releasing caps to MDS mid-write.
1365  *
1366  * If we are synchronous, and write with an old snap context, the OSD
1367  * may return EOLDSNAPC.  In that case, retry the write.. _after_
1368  * dropping our cap refs and allowing the pending snap to logically
1369  * complete _before_ this write occurs.
1370  *
1371  * If we are near ENOSPC, write synchronously.
1372  */
1373 static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from)
1374 {
1375         struct file *file = iocb->ki_filp;
1376         struct ceph_file_info *fi = file->private_data;
1377         struct inode *inode = file_inode(file);
1378         struct ceph_inode_info *ci = ceph_inode(inode);
1379         struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1380         struct ceph_cap_flush *prealloc_cf;
1381         ssize_t count, written = 0;
1382         int err, want, got;
1383         loff_t pos;
1384         loff_t limit = max(i_size_read(inode), fsc->max_file_size);
1385
1386         if (ceph_snap(inode) != CEPH_NOSNAP)
1387                 return -EROFS;
1388
1389         prealloc_cf = ceph_alloc_cap_flush();
1390         if (!prealloc_cf)
1391                 return -ENOMEM;
1392
1393 retry_snap:
1394         inode_lock(inode);
1395
1396         /* We can write back this queue in page reclaim */
1397         current->backing_dev_info = inode_to_bdi(inode);
1398
1399         if (iocb->ki_flags & IOCB_APPEND) {
1400                 err = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false);
1401                 if (err < 0)
1402                         goto out;
1403         }
1404
1405         err = generic_write_checks(iocb, from);
1406         if (err <= 0)
1407                 goto out;
1408
1409         pos = iocb->ki_pos;
1410         if (unlikely(pos >= limit)) {
1411                 err = -EFBIG;
1412                 goto out;
1413         } else {
1414                 iov_iter_truncate(from, limit - pos);
1415         }
1416
1417         count = iov_iter_count(from);
1418         if (ceph_quota_is_max_bytes_exceeded(inode, pos + count)) {
1419                 err = -EDQUOT;
1420                 goto out;
1421         }
1422
1423         err = file_remove_privs(file);
1424         if (err)
1425                 goto out;
1426
1427         err = file_update_time(file);
1428         if (err)
1429                 goto out;
1430
1431         if (ci->i_inline_version != CEPH_INLINE_NONE) {
1432                 err = ceph_uninline_data(file, NULL);
1433                 if (err < 0)
1434                         goto out;
1435         }
1436
1437         /* FIXME: not complete since it doesn't account for being at quota */
1438         if (ceph_osdmap_flag(&fsc->client->osdc, CEPH_OSDMAP_FULL)) {
1439                 err = -ENOSPC;
1440                 goto out;
1441         }
1442
1443         dout("aio_write %p %llx.%llx %llu~%zd getting caps. i_size %llu\n",
1444              inode, ceph_vinop(inode), pos, count, i_size_read(inode));
1445         if (fi->fmode & CEPH_FILE_MODE_LAZY)
1446                 want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
1447         else
1448                 want = CEPH_CAP_FILE_BUFFER;
1449         got = 0;
1450         err = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, pos + count,
1451                             &got, NULL);
1452         if (err < 0)
1453                 goto out;
1454
1455         dout("aio_write %p %llx.%llx %llu~%zd got cap refs on %s\n",
1456              inode, ceph_vinop(inode), pos, count, ceph_cap_string(got));
1457
1458         if ((got & (CEPH_CAP_FILE_BUFFER|CEPH_CAP_FILE_LAZYIO)) == 0 ||
1459             (iocb->ki_flags & IOCB_DIRECT) || (fi->flags & CEPH_F_SYNC) ||
1460             (ci->i_ceph_flags & CEPH_I_ERROR_WRITE)) {
1461                 struct ceph_snap_context *snapc;
1462                 struct iov_iter data;
1463                 inode_unlock(inode);
1464
1465                 spin_lock(&ci->i_ceph_lock);
1466                 if (__ceph_have_pending_cap_snap(ci)) {
1467                         struct ceph_cap_snap *capsnap =
1468                                         list_last_entry(&ci->i_cap_snaps,
1469                                                         struct ceph_cap_snap,
1470                                                         ci_item);
1471                         snapc = ceph_get_snap_context(capsnap->context);
1472                 } else {
1473                         BUG_ON(!ci->i_head_snapc);
1474                         snapc = ceph_get_snap_context(ci->i_head_snapc);
1475                 }
1476                 spin_unlock(&ci->i_ceph_lock);
1477
1478                 /* we might need to revert back to that point */
1479                 data = *from;
1480                 if (iocb->ki_flags & IOCB_DIRECT)
1481                         written = ceph_direct_read_write(iocb, &data, snapc,
1482                                                          &prealloc_cf);
1483                 else
1484                         written = ceph_sync_write(iocb, &data, pos, snapc);
1485                 if (written > 0)
1486                         iov_iter_advance(from, written);
1487                 ceph_put_snap_context(snapc);
1488         } else {
1489                 /*
1490                  * No need to acquire the i_truncate_mutex. Because
1491                  * the MDS revokes Fwb caps before sending truncate
1492                  * message to us. We can't get Fwb cap while there
1493                  * are pending vmtruncate. So write and vmtruncate
1494                  * can not run at the same time
1495                  */
1496                 written = generic_perform_write(file, from, pos);
1497                 if (likely(written >= 0))
1498                         iocb->ki_pos = pos + written;
1499                 inode_unlock(inode);
1500         }
1501
1502         if (written >= 0) {
1503                 int dirty;
1504
1505                 spin_lock(&ci->i_ceph_lock);
1506                 ci->i_inline_version = CEPH_INLINE_NONE;
1507                 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
1508                                                &prealloc_cf);
1509                 spin_unlock(&ci->i_ceph_lock);
1510                 if (dirty)
1511                         __mark_inode_dirty(inode, dirty);
1512                 if (ceph_quota_is_max_bytes_approaching(inode, iocb->ki_pos))
1513                         ceph_check_caps(ci, CHECK_CAPS_NODELAY, NULL);
1514         }
1515
1516         dout("aio_write %p %llx.%llx %llu~%u  dropping cap refs on %s\n",
1517              inode, ceph_vinop(inode), pos, (unsigned)count,
1518              ceph_cap_string(got));
1519         ceph_put_cap_refs(ci, got);
1520
1521         if (written == -EOLDSNAPC) {
1522                 dout("aio_write %p %llx.%llx %llu~%u" "got EOLDSNAPC, retrying\n",
1523                      inode, ceph_vinop(inode), pos, (unsigned)count);
1524                 goto retry_snap;
1525         }
1526
1527         if (written >= 0) {
1528                 if (ceph_osdmap_flag(&fsc->client->osdc, CEPH_OSDMAP_NEARFULL))
1529                         iocb->ki_flags |= IOCB_DSYNC;
1530                 written = generic_write_sync(iocb, written);
1531         }
1532
1533         goto out_unlocked;
1534
1535 out:
1536         inode_unlock(inode);
1537 out_unlocked:
1538         ceph_free_cap_flush(prealloc_cf);
1539         current->backing_dev_info = NULL;
1540         return written ? written : err;
1541 }
1542
1543 /*
1544  * llseek.  be sure to verify file size on SEEK_END.
1545  */
1546 static loff_t ceph_llseek(struct file *file, loff_t offset, int whence)
1547 {
1548         struct inode *inode = file->f_mapping->host;
1549         struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1550         loff_t i_size;
1551         loff_t ret;
1552
1553         inode_lock(inode);
1554
1555         if (whence == SEEK_END || whence == SEEK_DATA || whence == SEEK_HOLE) {
1556                 ret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false);
1557                 if (ret < 0)
1558                         goto out;
1559         }
1560
1561         i_size = i_size_read(inode);
1562         switch (whence) {
1563         case SEEK_END:
1564                 offset += i_size;
1565                 break;
1566         case SEEK_CUR:
1567                 /*
1568                  * Here we special-case the lseek(fd, 0, SEEK_CUR)
1569                  * position-querying operation.  Avoid rewriting the "same"
1570                  * f_pos value back to the file because a concurrent read(),
1571                  * write() or lseek() might have altered it
1572                  */
1573                 if (offset == 0) {
1574                         ret = file->f_pos;
1575                         goto out;
1576                 }
1577                 offset += file->f_pos;
1578                 break;
1579         case SEEK_DATA:
1580                 if (offset < 0 || offset >= i_size) {
1581                         ret = -ENXIO;
1582                         goto out;
1583                 }
1584                 break;
1585         case SEEK_HOLE:
1586                 if (offset < 0 || offset >= i_size) {
1587                         ret = -ENXIO;
1588                         goto out;
1589                 }
1590                 offset = i_size;
1591                 break;
1592         }
1593
1594         ret = vfs_setpos(file, offset, max(i_size, fsc->max_file_size));
1595
1596 out:
1597         inode_unlock(inode);
1598         return ret;
1599 }
1600
1601 static inline void ceph_zero_partial_page(
1602         struct inode *inode, loff_t offset, unsigned size)
1603 {
1604         struct page *page;
1605         pgoff_t index = offset >> PAGE_SHIFT;
1606
1607         page = find_lock_page(inode->i_mapping, index);
1608         if (page) {
1609                 wait_on_page_writeback(page);
1610                 zero_user(page, offset & (PAGE_SIZE - 1), size);
1611                 unlock_page(page);
1612                 put_page(page);
1613         }
1614 }
1615
1616 static void ceph_zero_pagecache_range(struct inode *inode, loff_t offset,
1617                                       loff_t length)
1618 {
1619         loff_t nearly = round_up(offset, PAGE_SIZE);
1620         if (offset < nearly) {
1621                 loff_t size = nearly - offset;
1622                 if (length < size)
1623                         size = length;
1624                 ceph_zero_partial_page(inode, offset, size);
1625                 offset += size;
1626                 length -= size;
1627         }
1628         if (length >= PAGE_SIZE) {
1629                 loff_t size = round_down(length, PAGE_SIZE);
1630                 truncate_pagecache_range(inode, offset, offset + size - 1);
1631                 offset += size;
1632                 length -= size;
1633         }
1634         if (length)
1635                 ceph_zero_partial_page(inode, offset, length);
1636 }
1637
1638 static int ceph_zero_partial_object(struct inode *inode,
1639                                     loff_t offset, loff_t *length)
1640 {
1641         struct ceph_inode_info *ci = ceph_inode(inode);
1642         struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1643         struct ceph_osd_request *req;
1644         int ret = 0;
1645         loff_t zero = 0;
1646         int op;
1647
1648         if (!length) {
1649                 op = offset ? CEPH_OSD_OP_DELETE : CEPH_OSD_OP_TRUNCATE;
1650                 length = &zero;
1651         } else {
1652                 op = CEPH_OSD_OP_ZERO;
1653         }
1654
1655         req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
1656                                         ceph_vino(inode),
1657                                         offset, length,
1658                                         0, 1, op,
1659                                         CEPH_OSD_FLAG_WRITE,
1660                                         NULL, 0, 0, false);
1661         if (IS_ERR(req)) {
1662                 ret = PTR_ERR(req);
1663                 goto out;
1664         }
1665
1666         req->r_mtime = inode->i_mtime;
1667         ret = ceph_osdc_start_request(&fsc->client->osdc, req, false);
1668         if (!ret) {
1669                 ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
1670                 if (ret == -ENOENT)
1671                         ret = 0;
1672         }
1673         ceph_osdc_put_request(req);
1674
1675 out:
1676         return ret;
1677 }
1678
1679 static int ceph_zero_objects(struct inode *inode, loff_t offset, loff_t length)
1680 {
1681         int ret = 0;
1682         struct ceph_inode_info *ci = ceph_inode(inode);
1683         s32 stripe_unit = ci->i_layout.stripe_unit;
1684         s32 stripe_count = ci->i_layout.stripe_count;
1685         s32 object_size = ci->i_layout.object_size;
1686         u64 object_set_size = object_size * stripe_count;
1687         u64 nearly, t;
1688
1689         /* round offset up to next period boundary */
1690         nearly = offset + object_set_size - 1;
1691         t = nearly;
1692         nearly -= do_div(t, object_set_size);
1693
1694         while (length && offset < nearly) {
1695                 loff_t size = length;
1696                 ret = ceph_zero_partial_object(inode, offset, &size);
1697                 if (ret < 0)
1698                         return ret;
1699                 offset += size;
1700                 length -= size;
1701         }
1702         while (length >= object_set_size) {
1703                 int i;
1704                 loff_t pos = offset;
1705                 for (i = 0; i < stripe_count; ++i) {
1706                         ret = ceph_zero_partial_object(inode, pos, NULL);
1707                         if (ret < 0)
1708                                 return ret;
1709                         pos += stripe_unit;
1710                 }
1711                 offset += object_set_size;
1712                 length -= object_set_size;
1713         }
1714         while (length) {
1715                 loff_t size = length;
1716                 ret = ceph_zero_partial_object(inode, offset, &size);
1717                 if (ret < 0)
1718                         return ret;
1719                 offset += size;
1720                 length -= size;
1721         }
1722         return ret;
1723 }
1724
1725 static long ceph_fallocate(struct file *file, int mode,
1726                                 loff_t offset, loff_t length)
1727 {
1728         struct ceph_file_info *fi = file->private_data;
1729         struct inode *inode = file_inode(file);
1730         struct ceph_inode_info *ci = ceph_inode(inode);
1731         struct ceph_cap_flush *prealloc_cf;
1732         int want, got = 0;
1733         int dirty;
1734         int ret = 0;
1735         loff_t endoff = 0;
1736         loff_t size;
1737
1738         if (mode != (FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
1739                 return -EOPNOTSUPP;
1740
1741         if (!S_ISREG(inode->i_mode))
1742                 return -EOPNOTSUPP;
1743
1744         prealloc_cf = ceph_alloc_cap_flush();
1745         if (!prealloc_cf)
1746                 return -ENOMEM;
1747
1748         inode_lock(inode);
1749
1750         if (ceph_snap(inode) != CEPH_NOSNAP) {
1751                 ret = -EROFS;
1752                 goto unlock;
1753         }
1754
1755         if (ci->i_inline_version != CEPH_INLINE_NONE) {
1756                 ret = ceph_uninline_data(file, NULL);
1757                 if (ret < 0)
1758                         goto unlock;
1759         }
1760
1761         size = i_size_read(inode);
1762
1763         /* Are we punching a hole beyond EOF? */
1764         if (offset >= size)
1765                 goto unlock;
1766         if ((offset + length) > size)
1767                 length = size - offset;
1768
1769         if (fi->fmode & CEPH_FILE_MODE_LAZY)
1770                 want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
1771         else
1772                 want = CEPH_CAP_FILE_BUFFER;
1773
1774         ret = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, endoff, &got, NULL);
1775         if (ret < 0)
1776                 goto unlock;
1777
1778         ceph_zero_pagecache_range(inode, offset, length);
1779         ret = ceph_zero_objects(inode, offset, length);
1780
1781         if (!ret) {
1782                 spin_lock(&ci->i_ceph_lock);
1783                 ci->i_inline_version = CEPH_INLINE_NONE;
1784                 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
1785                                                &prealloc_cf);
1786                 spin_unlock(&ci->i_ceph_lock);
1787                 if (dirty)
1788                         __mark_inode_dirty(inode, dirty);
1789         }
1790
1791         ceph_put_cap_refs(ci, got);
1792 unlock:
1793         inode_unlock(inode);
1794         ceph_free_cap_flush(prealloc_cf);
1795         return ret;
1796 }
1797
1798 /*
1799  * This function tries to get FILE_WR capabilities for dst_ci and FILE_RD for
1800  * src_ci.  Two attempts are made to obtain both caps, and an error is return if
1801  * this fails; zero is returned on success.
1802  */
1803 static int get_rd_wr_caps(struct ceph_inode_info *src_ci,
1804                           loff_t src_endoff, int *src_got,
1805                           struct ceph_inode_info *dst_ci,
1806                           loff_t dst_endoff, int *dst_got)
1807 {
1808         int ret = 0;
1809         bool retrying = false;
1810
1811 retry_caps:
1812         ret = ceph_get_caps(dst_ci, CEPH_CAP_FILE_WR, CEPH_CAP_FILE_BUFFER,
1813                             dst_endoff, dst_got, NULL);
1814         if (ret < 0)
1815                 return ret;
1816
1817         /*
1818          * Since we're already holding the FILE_WR capability for the dst file,
1819          * we would risk a deadlock by using ceph_get_caps.  Thus, we'll do some
1820          * retry dance instead to try to get both capabilities.
1821          */
1822         ret = ceph_try_get_caps(src_ci, CEPH_CAP_FILE_RD, CEPH_CAP_FILE_SHARED,
1823                                 false, src_got);
1824         if (ret <= 0) {
1825                 /* Start by dropping dst_ci caps and getting src_ci caps */
1826                 ceph_put_cap_refs(dst_ci, *dst_got);
1827                 if (retrying) {
1828                         if (!ret)
1829                                 /* ceph_try_get_caps masks EAGAIN */
1830                                 ret = -EAGAIN;
1831                         return ret;
1832                 }
1833                 ret = ceph_get_caps(src_ci, CEPH_CAP_FILE_RD,
1834                                     CEPH_CAP_FILE_SHARED, src_endoff,
1835                                     src_got, NULL);
1836                 if (ret < 0)
1837                         return ret;
1838                 /*... drop src_ci caps too, and retry */
1839                 ceph_put_cap_refs(src_ci, *src_got);
1840                 retrying = true;
1841                 goto retry_caps;
1842         }
1843         return ret;
1844 }
1845
1846 static void put_rd_wr_caps(struct ceph_inode_info *src_ci, int src_got,
1847                            struct ceph_inode_info *dst_ci, int dst_got)
1848 {
1849         ceph_put_cap_refs(src_ci, src_got);
1850         ceph_put_cap_refs(dst_ci, dst_got);
1851 }
1852
1853 /*
1854  * This function does several size-related checks, returning an error if:
1855  *  - source file is smaller than off+len
1856  *  - destination file size is not OK (inode_newsize_ok())
1857  *  - max bytes quotas is exceeded
1858  */
1859 static int is_file_size_ok(struct inode *src_inode, struct inode *dst_inode,
1860                            loff_t src_off, loff_t dst_off, size_t len)
1861 {
1862         loff_t size, endoff;
1863
1864         size = i_size_read(src_inode);
1865         /*
1866          * Don't copy beyond source file EOF.  Instead of simply setting length
1867          * to (size - src_off), just drop to VFS default implementation, as the
1868          * local i_size may be stale due to other clients writing to the source
1869          * inode.
1870          */
1871         if (src_off + len > size) {
1872                 dout("Copy beyond EOF (%llu + %zu > %llu)\n",
1873                      src_off, len, size);
1874                 return -EOPNOTSUPP;
1875         }
1876         size = i_size_read(dst_inode);
1877
1878         endoff = dst_off + len;
1879         if (inode_newsize_ok(dst_inode, endoff))
1880                 return -EOPNOTSUPP;
1881
1882         if (ceph_quota_is_max_bytes_exceeded(dst_inode, endoff))
1883                 return -EDQUOT;
1884
1885         return 0;
1886 }
1887
1888 static ssize_t ceph_copy_file_range(struct file *src_file, loff_t src_off,
1889                                     struct file *dst_file, loff_t dst_off,
1890                                     size_t len, unsigned int flags)
1891 {
1892         struct inode *src_inode = file_inode(src_file);
1893         struct inode *dst_inode = file_inode(dst_file);
1894         struct ceph_inode_info *src_ci = ceph_inode(src_inode);
1895         struct ceph_inode_info *dst_ci = ceph_inode(dst_inode);
1896         struct ceph_cap_flush *prealloc_cf;
1897         struct ceph_object_locator src_oloc, dst_oloc;
1898         struct ceph_object_id src_oid, dst_oid;
1899         loff_t endoff = 0, size;
1900         ssize_t ret = -EIO;
1901         u64 src_objnum, dst_objnum, src_objoff, dst_objoff;
1902         u32 src_objlen, dst_objlen, object_size;
1903         int src_got = 0, dst_got = 0, err, dirty;
1904         bool do_final_copy = false;
1905
1906         if (src_inode == dst_inode)
1907                 return -EINVAL;
1908         if (ceph_snap(dst_inode) != CEPH_NOSNAP)
1909                 return -EROFS;
1910
1911         /*
1912          * Some of the checks below will return -EOPNOTSUPP, which will force a
1913          * fallback to the default VFS copy_file_range implementation.  This is
1914          * desirable in several cases (for ex, the 'len' is smaller than the
1915          * size of the objects, or in cases where that would be more
1916          * efficient).
1917          */
1918
1919         if (ceph_test_mount_opt(ceph_inode_to_client(src_inode), NOCOPYFROM))
1920                 return -EOPNOTSUPP;
1921
1922         if ((src_ci->i_layout.stripe_unit != dst_ci->i_layout.stripe_unit) ||
1923             (src_ci->i_layout.stripe_count != dst_ci->i_layout.stripe_count) ||
1924             (src_ci->i_layout.object_size != dst_ci->i_layout.object_size))
1925                 return -EOPNOTSUPP;
1926
1927         if (len < src_ci->i_layout.object_size)
1928                 return -EOPNOTSUPP; /* no remote copy will be done */
1929
1930         prealloc_cf = ceph_alloc_cap_flush();
1931         if (!prealloc_cf)
1932                 return -ENOMEM;
1933
1934         /* Start by sync'ing the source file */
1935         ret = file_write_and_wait_range(src_file, src_off, (src_off + len));
1936         if (ret < 0)
1937                 goto out;
1938
1939         /*
1940          * We need FILE_WR caps for dst_ci and FILE_RD for src_ci as other
1941          * clients may have dirty data in their caches.  And OSDs know nothing
1942          * about caps, so they can't safely do the remote object copies.
1943          */
1944         err = get_rd_wr_caps(src_ci, (src_off + len), &src_got,
1945                              dst_ci, (dst_off + len), &dst_got);
1946         if (err < 0) {
1947                 dout("get_rd_wr_caps returned %d\n", err);
1948                 ret = -EOPNOTSUPP;
1949                 goto out;
1950         }
1951
1952         ret = is_file_size_ok(src_inode, dst_inode, src_off, dst_off, len);
1953         if (ret < 0)
1954                 goto out_caps;
1955
1956         size = i_size_read(dst_inode);
1957         endoff = dst_off + len;
1958
1959         /* Drop dst file cached pages */
1960         ret = invalidate_inode_pages2_range(dst_inode->i_mapping,
1961                                             dst_off >> PAGE_SHIFT,
1962                                             endoff >> PAGE_SHIFT);
1963         if (ret < 0) {
1964                 dout("Failed to invalidate inode pages (%zd)\n", ret);
1965                 ret = 0; /* XXX */
1966         }
1967         src_oloc.pool = src_ci->i_layout.pool_id;
1968         src_oloc.pool_ns = ceph_try_get_string(src_ci->i_layout.pool_ns);
1969         dst_oloc.pool = dst_ci->i_layout.pool_id;
1970         dst_oloc.pool_ns = ceph_try_get_string(dst_ci->i_layout.pool_ns);
1971
1972         ceph_calc_file_object_mapping(&src_ci->i_layout, src_off,
1973                                       src_ci->i_layout.object_size,
1974                                       &src_objnum, &src_objoff, &src_objlen);
1975         ceph_calc_file_object_mapping(&dst_ci->i_layout, dst_off,
1976                                       dst_ci->i_layout.object_size,
1977                                       &dst_objnum, &dst_objoff, &dst_objlen);
1978         /* object-level offsets need to the same */
1979         if (src_objoff != dst_objoff) {
1980                 ret = -EOPNOTSUPP;
1981                 goto out_caps;
1982         }
1983
1984         /*
1985          * Do a manual copy if the object offset isn't object aligned.
1986          * 'src_objlen' contains the bytes left until the end of the object,
1987          * starting at the src_off
1988          */
1989         if (src_objoff) {
1990                 /*
1991                  * we need to temporarily drop all caps as we'll be calling
1992                  * {read,write}_iter, which will get caps again.
1993                  */
1994                 put_rd_wr_caps(src_ci, src_got, dst_ci, dst_got);
1995                 ret = do_splice_direct(src_file, &src_off, dst_file,
1996                                        &dst_off, src_objlen, flags);
1997                 if (ret < 0) {
1998                         dout("do_splice_direct returned %d\n", err);
1999                         goto out;
2000                 }
2001                 len -= ret;
2002                 err = get_rd_wr_caps(src_ci, (src_off + len),
2003                                      &src_got, dst_ci,
2004                                      (dst_off + len), &dst_got);
2005                 if (err < 0)
2006                         goto out;
2007                 err = is_file_size_ok(src_inode, dst_inode,
2008                                       src_off, dst_off, len);
2009                 if (err < 0)
2010                         goto out_caps;
2011         }
2012         object_size = src_ci->i_layout.object_size;
2013         while (len >= object_size) {
2014                 ceph_calc_file_object_mapping(&src_ci->i_layout, src_off,
2015                                               object_size, &src_objnum,
2016                                               &src_objoff, &src_objlen);
2017                 ceph_calc_file_object_mapping(&dst_ci->i_layout, dst_off,
2018                                               object_size, &dst_objnum,
2019                                               &dst_objoff, &dst_objlen);
2020                 ceph_oid_init(&src_oid);
2021                 ceph_oid_printf(&src_oid, "%llx.%08llx",
2022                                 src_ci->i_vino.ino, src_objnum);
2023                 ceph_oid_init(&dst_oid);
2024                 ceph_oid_printf(&dst_oid, "%llx.%08llx",
2025                                 dst_ci->i_vino.ino, dst_objnum);
2026                 /* Do an object remote copy */
2027                 err = ceph_osdc_copy_from(
2028                         &ceph_inode_to_client(src_inode)->client->osdc,
2029                         src_ci->i_vino.snap, 0,
2030                         &src_oid, &src_oloc,
2031                         CEPH_OSD_OP_FLAG_FADVISE_SEQUENTIAL |
2032                         CEPH_OSD_OP_FLAG_FADVISE_NOCACHE,
2033                         &dst_oid, &dst_oloc,
2034                         CEPH_OSD_OP_FLAG_FADVISE_SEQUENTIAL |
2035                         CEPH_OSD_OP_FLAG_FADVISE_DONTNEED, 0);
2036                 if (err) {
2037                         dout("ceph_osdc_copy_from returned %d\n", err);
2038                         if (!ret)
2039                                 ret = err;
2040                         goto out_caps;
2041                 }
2042                 len -= object_size;
2043                 src_off += object_size;
2044                 dst_off += object_size;
2045                 ret += object_size;
2046         }
2047
2048         if (len)
2049                 /* We still need one final local copy */
2050                 do_final_copy = true;
2051
2052         file_update_time(dst_file);
2053         if (endoff > size) {
2054                 int caps_flags = 0;
2055
2056                 /* Let the MDS know about dst file size change */
2057                 if (ceph_quota_is_max_bytes_approaching(dst_inode, endoff))
2058                         caps_flags |= CHECK_CAPS_NODELAY;
2059                 if (ceph_inode_set_size(dst_inode, endoff))
2060                         caps_flags |= CHECK_CAPS_AUTHONLY;
2061                 if (caps_flags)
2062                         ceph_check_caps(dst_ci, caps_flags, NULL);
2063         }
2064         /* Mark Fw dirty */
2065         spin_lock(&dst_ci->i_ceph_lock);
2066         dst_ci->i_inline_version = CEPH_INLINE_NONE;
2067         dirty = __ceph_mark_dirty_caps(dst_ci, CEPH_CAP_FILE_WR, &prealloc_cf);
2068         spin_unlock(&dst_ci->i_ceph_lock);
2069         if (dirty)
2070                 __mark_inode_dirty(dst_inode, dirty);
2071
2072 out_caps:
2073         put_rd_wr_caps(src_ci, src_got, dst_ci, dst_got);
2074
2075         if (do_final_copy) {
2076                 err = do_splice_direct(src_file, &src_off, dst_file,
2077                                        &dst_off, len, flags);
2078                 if (err < 0) {
2079                         dout("do_splice_direct returned %d\n", err);
2080                         goto out;
2081                 }
2082                 len -= err;
2083                 ret += err;
2084         }
2085
2086 out:
2087         ceph_free_cap_flush(prealloc_cf);
2088
2089         return ret;
2090 }
2091
2092 const struct file_operations ceph_file_fops = {
2093         .open = ceph_open,
2094         .release = ceph_release,
2095         .llseek = ceph_llseek,
2096         .read_iter = ceph_read_iter,
2097         .write_iter = ceph_write_iter,
2098         .mmap = ceph_mmap,
2099         .fsync = ceph_fsync,
2100         .lock = ceph_lock,
2101         .flock = ceph_flock,
2102         .splice_read = generic_file_splice_read,
2103         .splice_write = iter_file_splice_write,
2104         .unlocked_ioctl = ceph_ioctl,
2105         .compat_ioctl   = ceph_ioctl,
2106         .fallocate      = ceph_fallocate,
2107         .copy_file_range = ceph_copy_file_range,
2108 };