xfs: drop dmapi hooks
[sfrench/cifs-2.6.git] / fs / xfs / xfs_bmap.c
1 /*
2  * Copyright (c) 2000-2006 Silicon Graphics, Inc.
3  * All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it would be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write the Free Software Foundation,
16  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17  */
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_types.h"
21 #include "xfs_bit.h"
22 #include "xfs_log.h"
23 #include "xfs_inum.h"
24 #include "xfs_trans.h"
25 #include "xfs_sb.h"
26 #include "xfs_ag.h"
27 #include "xfs_dir2.h"
28 #include "xfs_da_btree.h"
29 #include "xfs_bmap_btree.h"
30 #include "xfs_alloc_btree.h"
31 #include "xfs_ialloc_btree.h"
32 #include "xfs_dir2_sf.h"
33 #include "xfs_attr_sf.h"
34 #include "xfs_dinode.h"
35 #include "xfs_inode.h"
36 #include "xfs_btree.h"
37 #include "xfs_mount.h"
38 #include "xfs_ialloc.h"
39 #include "xfs_itable.h"
40 #include "xfs_dir2_data.h"
41 #include "xfs_dir2_leaf.h"
42 #include "xfs_dir2_block.h"
43 #include "xfs_inode_item.h"
44 #include "xfs_extfree_item.h"
45 #include "xfs_alloc.h"
46 #include "xfs_bmap.h"
47 #include "xfs_rtalloc.h"
48 #include "xfs_error.h"
49 #include "xfs_attr_leaf.h"
50 #include "xfs_rw.h"
51 #include "xfs_quota.h"
52 #include "xfs_trans_space.h"
53 #include "xfs_buf_item.h"
54 #include "xfs_filestream.h"
55 #include "xfs_vnodeops.h"
56 #include "xfs_trace.h"
57
58
59 #ifdef DEBUG
60 STATIC void
61 xfs_bmap_check_leaf_extents(xfs_btree_cur_t *cur, xfs_inode_t *ip, int whichfork);
62 #endif
63
64 kmem_zone_t             *xfs_bmap_free_item_zone;
65
66 /*
67  * Prototypes for internal bmap routines.
68  */
69
70
71 /*
72  * Called from xfs_bmap_add_attrfork to handle extents format files.
73  */
74 STATIC int                                      /* error */
75 xfs_bmap_add_attrfork_extents(
76         xfs_trans_t             *tp,            /* transaction pointer */
77         xfs_inode_t             *ip,            /* incore inode pointer */
78         xfs_fsblock_t           *firstblock,    /* first block allocated */
79         xfs_bmap_free_t         *flist,         /* blocks to free at commit */
80         int                     *flags);        /* inode logging flags */
81
82 /*
83  * Called from xfs_bmap_add_attrfork to handle local format files.
84  */
85 STATIC int                                      /* error */
86 xfs_bmap_add_attrfork_local(
87         xfs_trans_t             *tp,            /* transaction pointer */
88         xfs_inode_t             *ip,            /* incore inode pointer */
89         xfs_fsblock_t           *firstblock,    /* first block allocated */
90         xfs_bmap_free_t         *flist,         /* blocks to free at commit */
91         int                     *flags);        /* inode logging flags */
92
93 /*
94  * Called by xfs_bmapi to update file extent records and the btree
95  * after allocating space (or doing a delayed allocation).
96  */
97 STATIC int                              /* error */
98 xfs_bmap_add_extent(
99         xfs_inode_t             *ip,    /* incore inode pointer */
100         xfs_extnum_t            idx,    /* extent number to update/insert */
101         xfs_btree_cur_t         **curp, /* if *curp is null, not a btree */
102         xfs_bmbt_irec_t         *new,   /* new data to add to file extents */
103         xfs_fsblock_t           *first, /* pointer to firstblock variable */
104         xfs_bmap_free_t         *flist, /* list of extents to be freed */
105         int                     *logflagsp, /* inode logging flags */
106         xfs_extdelta_t          *delta, /* Change made to incore extents */
107         int                     whichfork, /* data or attr fork */
108         int                     rsvd);  /* OK to allocate reserved blocks */
109
110 /*
111  * Called by xfs_bmap_add_extent to handle cases converting a delayed
112  * allocation to a real allocation.
113  */
114 STATIC int                              /* error */
115 xfs_bmap_add_extent_delay_real(
116         xfs_inode_t             *ip,    /* incore inode pointer */
117         xfs_extnum_t            idx,    /* extent number to update/insert */
118         xfs_btree_cur_t         **curp, /* if *curp is null, not a btree */
119         xfs_bmbt_irec_t         *new,   /* new data to add to file extents */
120         xfs_filblks_t           *dnew,  /* new delayed-alloc indirect blocks */
121         xfs_fsblock_t           *first, /* pointer to firstblock variable */
122         xfs_bmap_free_t         *flist, /* list of extents to be freed */
123         int                     *logflagsp, /* inode logging flags */
124         xfs_extdelta_t          *delta, /* Change made to incore extents */
125         int                     rsvd);  /* OK to allocate reserved blocks */
126
127 /*
128  * Called by xfs_bmap_add_extent to handle cases converting a hole
129  * to a delayed allocation.
130  */
131 STATIC int                              /* error */
132 xfs_bmap_add_extent_hole_delay(
133         xfs_inode_t             *ip,    /* incore inode pointer */
134         xfs_extnum_t            idx,    /* extent number to update/insert */
135         xfs_bmbt_irec_t         *new,   /* new data to add to file extents */
136         int                     *logflagsp,/* inode logging flags */
137         xfs_extdelta_t          *delta, /* Change made to incore extents */
138         int                     rsvd);  /* OK to allocate reserved blocks */
139
140 /*
141  * Called by xfs_bmap_add_extent to handle cases converting a hole
142  * to a real allocation.
143  */
144 STATIC int                              /* error */
145 xfs_bmap_add_extent_hole_real(
146         xfs_inode_t             *ip,    /* incore inode pointer */
147         xfs_extnum_t            idx,    /* extent number to update/insert */
148         xfs_btree_cur_t         *cur,   /* if null, not a btree */
149         xfs_bmbt_irec_t         *new,   /* new data to add to file extents */
150         int                     *logflagsp, /* inode logging flags */
151         xfs_extdelta_t          *delta, /* Change made to incore extents */
152         int                     whichfork); /* data or attr fork */
153
154 /*
155  * Called by xfs_bmap_add_extent to handle cases converting an unwritten
156  * allocation to a real allocation or vice versa.
157  */
158 STATIC int                              /* error */
159 xfs_bmap_add_extent_unwritten_real(
160         xfs_inode_t             *ip,    /* incore inode pointer */
161         xfs_extnum_t            idx,    /* extent number to update/insert */
162         xfs_btree_cur_t         **curp, /* if *curp is null, not a btree */
163         xfs_bmbt_irec_t         *new,   /* new data to add to file extents */
164         int                     *logflagsp, /* inode logging flags */
165         xfs_extdelta_t          *delta); /* Change made to incore extents */
166
167 /*
168  * xfs_bmap_alloc is called by xfs_bmapi to allocate an extent for a file.
169  * It figures out where to ask the underlying allocator to put the new extent.
170  */
171 STATIC int                              /* error */
172 xfs_bmap_alloc(
173         xfs_bmalloca_t          *ap);   /* bmap alloc argument struct */
174
175 /*
176  * Transform a btree format file with only one leaf node, where the
177  * extents list will fit in the inode, into an extents format file.
178  * Since the file extents are already in-core, all we have to do is
179  * give up the space for the btree root and pitch the leaf block.
180  */
181 STATIC int                              /* error */
182 xfs_bmap_btree_to_extents(
183         xfs_trans_t             *tp,    /* transaction pointer */
184         xfs_inode_t             *ip,    /* incore inode pointer */
185         xfs_btree_cur_t         *cur,   /* btree cursor */
186         int                     *logflagsp, /* inode logging flags */
187         int                     whichfork); /* data or attr fork */
188
189 /*
190  * Called by xfs_bmapi to update file extent records and the btree
191  * after removing space (or undoing a delayed allocation).
192  */
193 STATIC int                              /* error */
194 xfs_bmap_del_extent(
195         xfs_inode_t             *ip,    /* incore inode pointer */
196         xfs_trans_t             *tp,    /* current trans pointer */
197         xfs_extnum_t            idx,    /* extent number to update/insert */
198         xfs_bmap_free_t         *flist, /* list of extents to be freed */
199         xfs_btree_cur_t         *cur,   /* if null, not a btree */
200         xfs_bmbt_irec_t         *new,   /* new data to add to file extents */
201         int                     *logflagsp,/* inode logging flags */
202         xfs_extdelta_t          *delta, /* Change made to incore extents */
203         int                     whichfork, /* data or attr fork */
204         int                     rsvd);   /* OK to allocate reserved blocks */
205
206 /*
207  * Remove the entry "free" from the free item list.  Prev points to the
208  * previous entry, unless "free" is the head of the list.
209  */
210 STATIC void
211 xfs_bmap_del_free(
212         xfs_bmap_free_t         *flist, /* free item list header */
213         xfs_bmap_free_item_t    *prev,  /* previous item on list, if any */
214         xfs_bmap_free_item_t    *free); /* list item to be freed */
215
216 /*
217  * Convert an extents-format file into a btree-format file.
218  * The new file will have a root block (in the inode) and a single child block.
219  */
220 STATIC int                                      /* error */
221 xfs_bmap_extents_to_btree(
222         xfs_trans_t             *tp,            /* transaction pointer */
223         xfs_inode_t             *ip,            /* incore inode pointer */
224         xfs_fsblock_t           *firstblock,    /* first-block-allocated */
225         xfs_bmap_free_t         *flist,         /* blocks freed in xaction */
226         xfs_btree_cur_t         **curp,         /* cursor returned to caller */
227         int                     wasdel,         /* converting a delayed alloc */
228         int                     *logflagsp,     /* inode logging flags */
229         int                     whichfork);     /* data or attr fork */
230
231 /*
232  * Convert a local file to an extents file.
233  * This code is sort of bogus, since the file data needs to get
234  * logged so it won't be lost.  The bmap-level manipulations are ok, though.
235  */
236 STATIC int                              /* error */
237 xfs_bmap_local_to_extents(
238         xfs_trans_t     *tp,            /* transaction pointer */
239         xfs_inode_t     *ip,            /* incore inode pointer */
240         xfs_fsblock_t   *firstblock,    /* first block allocated in xaction */
241         xfs_extlen_t    total,          /* total blocks needed by transaction */
242         int             *logflagsp,     /* inode logging flags */
243         int             whichfork);     /* data or attr fork */
244
245 /*
246  * Search the extents list for the inode, for the extent containing bno.
247  * If bno lies in a hole, point to the next entry.  If bno lies past eof,
248  * *eofp will be set, and *prevp will contain the last entry (null if none).
249  * Else, *lastxp will be set to the index of the found
250  * entry; *gotp will contain the entry.
251  */
252 STATIC xfs_bmbt_rec_host_t *            /* pointer to found extent entry */
253 xfs_bmap_search_extents(
254         xfs_inode_t     *ip,            /* incore inode pointer */
255         xfs_fileoff_t   bno,            /* block number searched for */
256         int             whichfork,      /* data or attr fork */
257         int             *eofp,          /* out: end of file found */
258         xfs_extnum_t    *lastxp,        /* out: last extent index */
259         xfs_bmbt_irec_t *gotp,          /* out: extent entry found */
260         xfs_bmbt_irec_t *prevp);        /* out: previous extent entry found */
261
262 /*
263  * Check the last inode extent to determine whether this allocation will result
264  * in blocks being allocated at the end of the file. When we allocate new data
265  * blocks at the end of the file which do not start at the previous data block,
266  * we will try to align the new blocks at stripe unit boundaries.
267  */
268 STATIC int                              /* error */
269 xfs_bmap_isaeof(
270         xfs_inode_t     *ip,            /* incore inode pointer */
271         xfs_fileoff_t   off,            /* file offset in fsblocks */
272         int             whichfork,      /* data or attribute fork */
273         char            *aeof);         /* return value */
274
275 /*
276  * Compute the worst-case number of indirect blocks that will be used
277  * for ip's delayed extent of length "len".
278  */
279 STATIC xfs_filblks_t
280 xfs_bmap_worst_indlen(
281         xfs_inode_t             *ip,    /* incore inode pointer */
282         xfs_filblks_t           len);   /* delayed extent length */
283
284 #ifdef DEBUG
285 /*
286  * Perform various validation checks on the values being returned
287  * from xfs_bmapi().
288  */
289 STATIC void
290 xfs_bmap_validate_ret(
291         xfs_fileoff_t           bno,
292         xfs_filblks_t           len,
293         int                     flags,
294         xfs_bmbt_irec_t         *mval,
295         int                     nmap,
296         int                     ret_nmap);
297 #else
298 #define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
299 #endif /* DEBUG */
300
301 STATIC int
302 xfs_bmap_count_tree(
303         xfs_mount_t     *mp,
304         xfs_trans_t     *tp,
305         xfs_ifork_t     *ifp,
306         xfs_fsblock_t   blockno,
307         int             levelin,
308         int             *count);
309
310 STATIC void
311 xfs_bmap_count_leaves(
312         xfs_ifork_t             *ifp,
313         xfs_extnum_t            idx,
314         int                     numrecs,
315         int                     *count);
316
317 STATIC void
318 xfs_bmap_disk_count_leaves(
319         struct xfs_mount        *mp,
320         struct xfs_btree_block  *block,
321         int                     numrecs,
322         int                     *count);
323
324 /*
325  * Bmap internal routines.
326  */
327
328 STATIC int                              /* error */
329 xfs_bmbt_lookup_eq(
330         struct xfs_btree_cur    *cur,
331         xfs_fileoff_t           off,
332         xfs_fsblock_t           bno,
333         xfs_filblks_t           len,
334         int                     *stat)  /* success/failure */
335 {
336         cur->bc_rec.b.br_startoff = off;
337         cur->bc_rec.b.br_startblock = bno;
338         cur->bc_rec.b.br_blockcount = len;
339         return xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat);
340 }
341
342 STATIC int                              /* error */
343 xfs_bmbt_lookup_ge(
344         struct xfs_btree_cur    *cur,
345         xfs_fileoff_t           off,
346         xfs_fsblock_t           bno,
347         xfs_filblks_t           len,
348         int                     *stat)  /* success/failure */
349 {
350         cur->bc_rec.b.br_startoff = off;
351         cur->bc_rec.b.br_startblock = bno;
352         cur->bc_rec.b.br_blockcount = len;
353         return xfs_btree_lookup(cur, XFS_LOOKUP_GE, stat);
354 }
355
356 /*
357 * Update the record referred to by cur to the value given
358  * by [off, bno, len, state].
359  * This either works (return 0) or gets an EFSCORRUPTED error.
360  */
361 STATIC int
362 xfs_bmbt_update(
363         struct xfs_btree_cur    *cur,
364         xfs_fileoff_t           off,
365         xfs_fsblock_t           bno,
366         xfs_filblks_t           len,
367         xfs_exntst_t            state)
368 {
369         union xfs_btree_rec     rec;
370
371         xfs_bmbt_disk_set_allf(&rec.bmbt, off, bno, len, state);
372         return xfs_btree_update(cur, &rec);
373 }
374
375 /*
376  * Called from xfs_bmap_add_attrfork to handle btree format files.
377  */
378 STATIC int                                      /* error */
379 xfs_bmap_add_attrfork_btree(
380         xfs_trans_t             *tp,            /* transaction pointer */
381         xfs_inode_t             *ip,            /* incore inode pointer */
382         xfs_fsblock_t           *firstblock,    /* first block allocated */
383         xfs_bmap_free_t         *flist,         /* blocks to free at commit */
384         int                     *flags)         /* inode logging flags */
385 {
386         xfs_btree_cur_t         *cur;           /* btree cursor */
387         int                     error;          /* error return value */
388         xfs_mount_t             *mp;            /* file system mount struct */
389         int                     stat;           /* newroot status */
390
391         mp = ip->i_mount;
392         if (ip->i_df.if_broot_bytes <= XFS_IFORK_DSIZE(ip))
393                 *flags |= XFS_ILOG_DBROOT;
394         else {
395                 cur = xfs_bmbt_init_cursor(mp, tp, ip, XFS_DATA_FORK);
396                 cur->bc_private.b.flist = flist;
397                 cur->bc_private.b.firstblock = *firstblock;
398                 if ((error = xfs_bmbt_lookup_ge(cur, 0, 0, 0, &stat)))
399                         goto error0;
400                 /* must be at least one entry */
401                 XFS_WANT_CORRUPTED_GOTO(stat == 1, error0);
402                 if ((error = xfs_btree_new_iroot(cur, flags, &stat)))
403                         goto error0;
404                 if (stat == 0) {
405                         xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
406                         return XFS_ERROR(ENOSPC);
407                 }
408                 *firstblock = cur->bc_private.b.firstblock;
409                 cur->bc_private.b.allocated = 0;
410                 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
411         }
412         return 0;
413 error0:
414         xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
415         return error;
416 }
417
418 /*
419  * Called from xfs_bmap_add_attrfork to handle extents format files.
420  */
421 STATIC int                                      /* error */
422 xfs_bmap_add_attrfork_extents(
423         xfs_trans_t             *tp,            /* transaction pointer */
424         xfs_inode_t             *ip,            /* incore inode pointer */
425         xfs_fsblock_t           *firstblock,    /* first block allocated */
426         xfs_bmap_free_t         *flist,         /* blocks to free at commit */
427         int                     *flags)         /* inode logging flags */
428 {
429         xfs_btree_cur_t         *cur;           /* bmap btree cursor */
430         int                     error;          /* error return value */
431
432         if (ip->i_d.di_nextents * sizeof(xfs_bmbt_rec_t) <= XFS_IFORK_DSIZE(ip))
433                 return 0;
434         cur = NULL;
435         error = xfs_bmap_extents_to_btree(tp, ip, firstblock, flist, &cur, 0,
436                 flags, XFS_DATA_FORK);
437         if (cur) {
438                 cur->bc_private.b.allocated = 0;
439                 xfs_btree_del_cursor(cur,
440                         error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
441         }
442         return error;
443 }
444
445 /*
446  * Called from xfs_bmap_add_attrfork to handle local format files.
447  */
448 STATIC int                                      /* error */
449 xfs_bmap_add_attrfork_local(
450         xfs_trans_t             *tp,            /* transaction pointer */
451         xfs_inode_t             *ip,            /* incore inode pointer */
452         xfs_fsblock_t           *firstblock,    /* first block allocated */
453         xfs_bmap_free_t         *flist,         /* blocks to free at commit */
454         int                     *flags)         /* inode logging flags */
455 {
456         xfs_da_args_t           dargs;          /* args for dir/attr code */
457         int                     error;          /* error return value */
458         xfs_mount_t             *mp;            /* mount structure pointer */
459
460         if (ip->i_df.if_bytes <= XFS_IFORK_DSIZE(ip))
461                 return 0;
462         if ((ip->i_d.di_mode & S_IFMT) == S_IFDIR) {
463                 mp = ip->i_mount;
464                 memset(&dargs, 0, sizeof(dargs));
465                 dargs.dp = ip;
466                 dargs.firstblock = firstblock;
467                 dargs.flist = flist;
468                 dargs.total = mp->m_dirblkfsbs;
469                 dargs.whichfork = XFS_DATA_FORK;
470                 dargs.trans = tp;
471                 error = xfs_dir2_sf_to_block(&dargs);
472         } else
473                 error = xfs_bmap_local_to_extents(tp, ip, firstblock, 1, flags,
474                         XFS_DATA_FORK);
475         return error;
476 }
477
478 /*
479  * Called by xfs_bmapi to update file extent records and the btree
480  * after allocating space (or doing a delayed allocation).
481  */
482 STATIC int                              /* error */
483 xfs_bmap_add_extent(
484         xfs_inode_t             *ip,    /* incore inode pointer */
485         xfs_extnum_t            idx,    /* extent number to update/insert */
486         xfs_btree_cur_t         **curp, /* if *curp is null, not a btree */
487         xfs_bmbt_irec_t         *new,   /* new data to add to file extents */
488         xfs_fsblock_t           *first, /* pointer to firstblock variable */
489         xfs_bmap_free_t         *flist, /* list of extents to be freed */
490         int                     *logflagsp, /* inode logging flags */
491         xfs_extdelta_t          *delta, /* Change made to incore extents */
492         int                     whichfork, /* data or attr fork */
493         int                     rsvd)   /* OK to use reserved data blocks */
494 {
495         xfs_btree_cur_t         *cur;   /* btree cursor or null */
496         xfs_filblks_t           da_new; /* new count del alloc blocks used */
497         xfs_filblks_t           da_old; /* old count del alloc blocks used */
498         int                     error;  /* error return value */
499         xfs_ifork_t             *ifp;   /* inode fork ptr */
500         int                     logflags; /* returned value */
501         xfs_extnum_t            nextents; /* number of extents in file now */
502
503         XFS_STATS_INC(xs_add_exlist);
504         cur = *curp;
505         ifp = XFS_IFORK_PTR(ip, whichfork);
506         nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
507         ASSERT(idx <= nextents);
508         da_old = da_new = 0;
509         error = 0;
510         /*
511          * This is the first extent added to a new/empty file.
512          * Special case this one, so other routines get to assume there are
513          * already extents in the list.
514          */
515         if (nextents == 0) {
516                 xfs_iext_insert(ip, 0, 1, new,
517                                 whichfork == XFS_ATTR_FORK ? BMAP_ATTRFORK : 0);
518
519                 ASSERT(cur == NULL);
520                 ifp->if_lastex = 0;
521                 if (!isnullstartblock(new->br_startblock)) {
522                         XFS_IFORK_NEXT_SET(ip, whichfork, 1);
523                         logflags = XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
524                 } else
525                         logflags = 0;
526                 /* DELTA: single new extent */
527                 if (delta) {
528                         if (delta->xed_startoff > new->br_startoff)
529                                 delta->xed_startoff = new->br_startoff;
530                         if (delta->xed_blockcount <
531                                         new->br_startoff + new->br_blockcount)
532                                 delta->xed_blockcount = new->br_startoff +
533                                                 new->br_blockcount;
534                 }
535         }
536         /*
537          * Any kind of new delayed allocation goes here.
538          */
539         else if (isnullstartblock(new->br_startblock)) {
540                 if (cur)
541                         ASSERT((cur->bc_private.b.flags &
542                                 XFS_BTCUR_BPRV_WASDEL) == 0);
543                 if ((error = xfs_bmap_add_extent_hole_delay(ip, idx, new,
544                                 &logflags, delta, rsvd)))
545                         goto done;
546         }
547         /*
548          * Real allocation off the end of the file.
549          */
550         else if (idx == nextents) {
551                 if (cur)
552                         ASSERT((cur->bc_private.b.flags &
553                                 XFS_BTCUR_BPRV_WASDEL) == 0);
554                 if ((error = xfs_bmap_add_extent_hole_real(ip, idx, cur, new,
555                                 &logflags, delta, whichfork)))
556                         goto done;
557         } else {
558                 xfs_bmbt_irec_t prev;   /* old extent at offset idx */
559
560                 /*
561                  * Get the record referred to by idx.
562                  */
563                 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx), &prev);
564                 /*
565                  * If it's a real allocation record, and the new allocation ends
566                  * after the start of the referred to record, then we're filling
567                  * in a delayed or unwritten allocation with a real one, or
568                  * converting real back to unwritten.
569                  */
570                 if (!isnullstartblock(new->br_startblock) &&
571                     new->br_startoff + new->br_blockcount > prev.br_startoff) {
572                         if (prev.br_state != XFS_EXT_UNWRITTEN &&
573                             isnullstartblock(prev.br_startblock)) {
574                                 da_old = startblockval(prev.br_startblock);
575                                 if (cur)
576                                         ASSERT(cur->bc_private.b.flags &
577                                                 XFS_BTCUR_BPRV_WASDEL);
578                                 if ((error = xfs_bmap_add_extent_delay_real(ip,
579                                         idx, &cur, new, &da_new, first, flist,
580                                         &logflags, delta, rsvd)))
581                                         goto done;
582                         } else if (new->br_state == XFS_EXT_NORM) {
583                                 ASSERT(new->br_state == XFS_EXT_NORM);
584                                 if ((error = xfs_bmap_add_extent_unwritten_real(
585                                         ip, idx, &cur, new, &logflags, delta)))
586                                         goto done;
587                         } else {
588                                 ASSERT(new->br_state == XFS_EXT_UNWRITTEN);
589                                 if ((error = xfs_bmap_add_extent_unwritten_real(
590                                         ip, idx, &cur, new, &logflags, delta)))
591                                         goto done;
592                         }
593                         ASSERT(*curp == cur || *curp == NULL);
594                 }
595                 /*
596                  * Otherwise we're filling in a hole with an allocation.
597                  */
598                 else {
599                         if (cur)
600                                 ASSERT((cur->bc_private.b.flags &
601                                         XFS_BTCUR_BPRV_WASDEL) == 0);
602                         if ((error = xfs_bmap_add_extent_hole_real(ip, idx, cur,
603                                         new, &logflags, delta, whichfork)))
604                                 goto done;
605                 }
606         }
607
608         ASSERT(*curp == cur || *curp == NULL);
609         /*
610          * Convert to a btree if necessary.
611          */
612         if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS &&
613             XFS_IFORK_NEXTENTS(ip, whichfork) > ifp->if_ext_max) {
614                 int     tmp_logflags;   /* partial log flag return val */
615
616                 ASSERT(cur == NULL);
617                 error = xfs_bmap_extents_to_btree(ip->i_transp, ip, first,
618                         flist, &cur, da_old > 0, &tmp_logflags, whichfork);
619                 logflags |= tmp_logflags;
620                 if (error)
621                         goto done;
622         }
623         /*
624          * Adjust for changes in reserved delayed indirect blocks.
625          * Nothing to do for disk quotas here.
626          */
627         if (da_old || da_new) {
628                 xfs_filblks_t   nblks;
629
630                 nblks = da_new;
631                 if (cur)
632                         nblks += cur->bc_private.b.allocated;
633                 ASSERT(nblks <= da_old);
634                 if (nblks < da_old)
635                         xfs_mod_incore_sb(ip->i_mount, XFS_SBS_FDBLOCKS,
636                                 (int64_t)(da_old - nblks), rsvd);
637         }
638         /*
639          * Clear out the allocated field, done with it now in any case.
640          */
641         if (cur) {
642                 cur->bc_private.b.allocated = 0;
643                 *curp = cur;
644         }
645 done:
646 #ifdef DEBUG
647         if (!error)
648                 xfs_bmap_check_leaf_extents(*curp, ip, whichfork);
649 #endif
650         *logflagsp = logflags;
651         return error;
652 }
653
654 /*
655  * Called by xfs_bmap_add_extent to handle cases converting a delayed
656  * allocation to a real allocation.
657  */
658 STATIC int                              /* error */
659 xfs_bmap_add_extent_delay_real(
660         xfs_inode_t             *ip,    /* incore inode pointer */
661         xfs_extnum_t            idx,    /* extent number to update/insert */
662         xfs_btree_cur_t         **curp, /* if *curp is null, not a btree */
663         xfs_bmbt_irec_t         *new,   /* new data to add to file extents */
664         xfs_filblks_t           *dnew,  /* new delayed-alloc indirect blocks */
665         xfs_fsblock_t           *first, /* pointer to firstblock variable */
666         xfs_bmap_free_t         *flist, /* list of extents to be freed */
667         int                     *logflagsp, /* inode logging flags */
668         xfs_extdelta_t          *delta, /* Change made to incore extents */
669         int                     rsvd)   /* OK to use reserved data block allocation */
670 {
671         xfs_btree_cur_t         *cur;   /* btree cursor */
672         int                     diff;   /* temp value */
673         xfs_bmbt_rec_host_t     *ep;    /* extent entry for idx */
674         int                     error;  /* error return value */
675         int                     i;      /* temp state */
676         xfs_ifork_t             *ifp;   /* inode fork pointer */
677         xfs_fileoff_t           new_endoff;     /* end offset of new entry */
678         xfs_bmbt_irec_t         r[3];   /* neighbor extent entries */
679                                         /* left is 0, right is 1, prev is 2 */
680         int                     rval=0; /* return value (logging flags) */
681         int                     state = 0;/* state bits, accessed thru macros */
682         xfs_filblks_t           temp=0; /* value for dnew calculations */
683         xfs_filblks_t           temp2=0;/* value for dnew calculations */
684         int                     tmp_rval;       /* partial logging flags */
685
686 #define LEFT            r[0]
687 #define RIGHT           r[1]
688 #define PREV            r[2]
689
690         /*
691          * Set up a bunch of variables to make the tests simpler.
692          */
693         cur = *curp;
694         ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
695         ep = xfs_iext_get_ext(ifp, idx);
696         xfs_bmbt_get_all(ep, &PREV);
697         new_endoff = new->br_startoff + new->br_blockcount;
698         ASSERT(PREV.br_startoff <= new->br_startoff);
699         ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff);
700
701         /*
702          * Set flags determining what part of the previous delayed allocation
703          * extent is being replaced by a real allocation.
704          */
705         if (PREV.br_startoff == new->br_startoff)
706                 state |= BMAP_LEFT_FILLING;
707         if (PREV.br_startoff + PREV.br_blockcount == new_endoff)
708                 state |= BMAP_RIGHT_FILLING;
709
710         /*
711          * Check and set flags if this segment has a left neighbor.
712          * Don't set contiguous if the combined extent would be too large.
713          */
714         if (idx > 0) {
715                 state |= BMAP_LEFT_VALID;
716                 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx - 1), &LEFT);
717
718                 if (isnullstartblock(LEFT.br_startblock))
719                         state |= BMAP_LEFT_DELAY;
720         }
721
722         if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
723             LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff &&
724             LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock &&
725             LEFT.br_state == new->br_state &&
726             LEFT.br_blockcount + new->br_blockcount <= MAXEXTLEN)
727                 state |= BMAP_LEFT_CONTIG;
728
729         /*
730          * Check and set flags if this segment has a right neighbor.
731          * Don't set contiguous if the combined extent would be too large.
732          * Also check for all-three-contiguous being too large.
733          */
734         if (idx < ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t) - 1) {
735                 state |= BMAP_RIGHT_VALID;
736                 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx + 1), &RIGHT);
737
738                 if (isnullstartblock(RIGHT.br_startblock))
739                         state |= BMAP_RIGHT_DELAY;
740         }
741
742         if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
743             new_endoff == RIGHT.br_startoff &&
744             new->br_startblock + new->br_blockcount == RIGHT.br_startblock &&
745             new->br_state == RIGHT.br_state &&
746             new->br_blockcount + RIGHT.br_blockcount <= MAXEXTLEN &&
747             ((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
748                        BMAP_RIGHT_FILLING)) !=
749                       (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
750                        BMAP_RIGHT_FILLING) ||
751              LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount
752                         <= MAXEXTLEN))
753                 state |= BMAP_RIGHT_CONTIG;
754
755         error = 0;
756         /*
757          * Switch out based on the FILLING and CONTIG state bits.
758          */
759         switch (state & (BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
760                          BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG)) {
761         case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
762              BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
763                 /*
764                  * Filling in all of a previously delayed allocation extent.
765                  * The left and right neighbors are both contiguous with new.
766                  */
767                 trace_xfs_bmap_pre_update(ip, idx - 1, state, _THIS_IP_);
768                 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1),
769                         LEFT.br_blockcount + PREV.br_blockcount +
770                         RIGHT.br_blockcount);
771                 trace_xfs_bmap_post_update(ip, idx - 1, state, _THIS_IP_);
772
773                 xfs_iext_remove(ip, idx, 2, state);
774                 ip->i_df.if_lastex = idx - 1;
775                 ip->i_d.di_nextents--;
776                 if (cur == NULL)
777                         rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
778                 else {
779                         rval = XFS_ILOG_CORE;
780                         if ((error = xfs_bmbt_lookup_eq(cur, RIGHT.br_startoff,
781                                         RIGHT.br_startblock,
782                                         RIGHT.br_blockcount, &i)))
783                                 goto done;
784                         XFS_WANT_CORRUPTED_GOTO(i == 1, done);
785                         if ((error = xfs_btree_delete(cur, &i)))
786                                 goto done;
787                         XFS_WANT_CORRUPTED_GOTO(i == 1, done);
788                         if ((error = xfs_btree_decrement(cur, 0, &i)))
789                                 goto done;
790                         XFS_WANT_CORRUPTED_GOTO(i == 1, done);
791                         if ((error = xfs_bmbt_update(cur, LEFT.br_startoff,
792                                         LEFT.br_startblock,
793                                         LEFT.br_blockcount +
794                                         PREV.br_blockcount +
795                                         RIGHT.br_blockcount, LEFT.br_state)))
796                                 goto done;
797                 }
798                 *dnew = 0;
799                 /* DELTA: Three in-core extents are replaced by one. */
800                 temp = LEFT.br_startoff;
801                 temp2 = LEFT.br_blockcount +
802                         PREV.br_blockcount +
803                         RIGHT.br_blockcount;
804                 break;
805
806         case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
807                 /*
808                  * Filling in all of a previously delayed allocation extent.
809                  * The left neighbor is contiguous, the right is not.
810                  */
811                 trace_xfs_bmap_pre_update(ip, idx - 1, state, _THIS_IP_);
812                 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1),
813                         LEFT.br_blockcount + PREV.br_blockcount);
814                 trace_xfs_bmap_post_update(ip, idx - 1, state, _THIS_IP_);
815
816                 ip->i_df.if_lastex = idx - 1;
817                 xfs_iext_remove(ip, idx, 1, state);
818                 if (cur == NULL)
819                         rval = XFS_ILOG_DEXT;
820                 else {
821                         rval = 0;
822                         if ((error = xfs_bmbt_lookup_eq(cur, LEFT.br_startoff,
823                                         LEFT.br_startblock, LEFT.br_blockcount,
824                                         &i)))
825                                 goto done;
826                         XFS_WANT_CORRUPTED_GOTO(i == 1, done);
827                         if ((error = xfs_bmbt_update(cur, LEFT.br_startoff,
828                                         LEFT.br_startblock,
829                                         LEFT.br_blockcount +
830                                         PREV.br_blockcount, LEFT.br_state)))
831                                 goto done;
832                 }
833                 *dnew = 0;
834                 /* DELTA: Two in-core extents are replaced by one. */
835                 temp = LEFT.br_startoff;
836                 temp2 = LEFT.br_blockcount +
837                         PREV.br_blockcount;
838                 break;
839
840         case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
841                 /*
842                  * Filling in all of a previously delayed allocation extent.
843                  * The right neighbor is contiguous, the left is not.
844                  */
845                 trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_);
846                 xfs_bmbt_set_startblock(ep, new->br_startblock);
847                 xfs_bmbt_set_blockcount(ep,
848                         PREV.br_blockcount + RIGHT.br_blockcount);
849                 trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
850
851                 ip->i_df.if_lastex = idx;
852                 xfs_iext_remove(ip, idx + 1, 1, state);
853                 if (cur == NULL)
854                         rval = XFS_ILOG_DEXT;
855                 else {
856                         rval = 0;
857                         if ((error = xfs_bmbt_lookup_eq(cur, RIGHT.br_startoff,
858                                         RIGHT.br_startblock,
859                                         RIGHT.br_blockcount, &i)))
860                                 goto done;
861                         XFS_WANT_CORRUPTED_GOTO(i == 1, done);
862                         if ((error = xfs_bmbt_update(cur, PREV.br_startoff,
863                                         new->br_startblock,
864                                         PREV.br_blockcount +
865                                         RIGHT.br_blockcount, PREV.br_state)))
866                                 goto done;
867                 }
868                 *dnew = 0;
869                 /* DELTA: Two in-core extents are replaced by one. */
870                 temp = PREV.br_startoff;
871                 temp2 = PREV.br_blockcount +
872                         RIGHT.br_blockcount;
873                 break;
874
875         case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
876                 /*
877                  * Filling in all of a previously delayed allocation extent.
878                  * Neither the left nor right neighbors are contiguous with
879                  * the new one.
880                  */
881                 trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_);
882                 xfs_bmbt_set_startblock(ep, new->br_startblock);
883                 trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
884
885                 ip->i_df.if_lastex = idx;
886                 ip->i_d.di_nextents++;
887                 if (cur == NULL)
888                         rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
889                 else {
890                         rval = XFS_ILOG_CORE;
891                         if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff,
892                                         new->br_startblock, new->br_blockcount,
893                                         &i)))
894                                 goto done;
895                         XFS_WANT_CORRUPTED_GOTO(i == 0, done);
896                         cur->bc_rec.b.br_state = XFS_EXT_NORM;
897                         if ((error = xfs_btree_insert(cur, &i)))
898                                 goto done;
899                         XFS_WANT_CORRUPTED_GOTO(i == 1, done);
900                 }
901                 *dnew = 0;
902                 /* DELTA: The in-core extent described by new changed type. */
903                 temp = new->br_startoff;
904                 temp2 = new->br_blockcount;
905                 break;
906
907         case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG:
908                 /*
909                  * Filling in the first part of a previous delayed allocation.
910                  * The left neighbor is contiguous.
911                  */
912                 trace_xfs_bmap_pre_update(ip, idx - 1, state, _THIS_IP_);
913                 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1),
914                         LEFT.br_blockcount + new->br_blockcount);
915                 xfs_bmbt_set_startoff(ep,
916                         PREV.br_startoff + new->br_blockcount);
917                 trace_xfs_bmap_post_update(ip, idx - 1, state, _THIS_IP_);
918
919                 temp = PREV.br_blockcount - new->br_blockcount;
920                 trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_);
921                 xfs_bmbt_set_blockcount(ep, temp);
922                 ip->i_df.if_lastex = idx - 1;
923                 if (cur == NULL)
924                         rval = XFS_ILOG_DEXT;
925                 else {
926                         rval = 0;
927                         if ((error = xfs_bmbt_lookup_eq(cur, LEFT.br_startoff,
928                                         LEFT.br_startblock, LEFT.br_blockcount,
929                                         &i)))
930                                 goto done;
931                         XFS_WANT_CORRUPTED_GOTO(i == 1, done);
932                         if ((error = xfs_bmbt_update(cur, LEFT.br_startoff,
933                                         LEFT.br_startblock,
934                                         LEFT.br_blockcount +
935                                         new->br_blockcount,
936                                         LEFT.br_state)))
937                                 goto done;
938                 }
939                 temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
940                         startblockval(PREV.br_startblock));
941                 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
942                 trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
943                 *dnew = temp;
944                 /* DELTA: The boundary between two in-core extents moved. */
945                 temp = LEFT.br_startoff;
946                 temp2 = LEFT.br_blockcount +
947                         PREV.br_blockcount;
948                 break;
949
950         case BMAP_LEFT_FILLING:
951                 /*
952                  * Filling in the first part of a previous delayed allocation.
953                  * The left neighbor is not contiguous.
954                  */
955                 trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_);
956                 xfs_bmbt_set_startoff(ep, new_endoff);
957                 temp = PREV.br_blockcount - new->br_blockcount;
958                 xfs_bmbt_set_blockcount(ep, temp);
959                 xfs_iext_insert(ip, idx, 1, new, state);
960                 ip->i_df.if_lastex = idx;
961                 ip->i_d.di_nextents++;
962                 if (cur == NULL)
963                         rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
964                 else {
965                         rval = XFS_ILOG_CORE;
966                         if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff,
967                                         new->br_startblock, new->br_blockcount,
968                                         &i)))
969                                 goto done;
970                         XFS_WANT_CORRUPTED_GOTO(i == 0, done);
971                         cur->bc_rec.b.br_state = XFS_EXT_NORM;
972                         if ((error = xfs_btree_insert(cur, &i)))
973                                 goto done;
974                         XFS_WANT_CORRUPTED_GOTO(i == 1, done);
975                 }
976                 if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
977                     ip->i_d.di_nextents > ip->i_df.if_ext_max) {
978                         error = xfs_bmap_extents_to_btree(ip->i_transp, ip,
979                                         first, flist, &cur, 1, &tmp_rval,
980                                         XFS_DATA_FORK);
981                         rval |= tmp_rval;
982                         if (error)
983                                 goto done;
984                 }
985                 temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
986                         startblockval(PREV.br_startblock) -
987                         (cur ? cur->bc_private.b.allocated : 0));
988                 ep = xfs_iext_get_ext(ifp, idx + 1);
989                 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
990                 trace_xfs_bmap_post_update(ip, idx + 1, state, _THIS_IP_);
991                 *dnew = temp;
992                 /* DELTA: One in-core extent is split in two. */
993                 temp = PREV.br_startoff;
994                 temp2 = PREV.br_blockcount;
995                 break;
996
997         case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
998                 /*
999                  * Filling in the last part of a previous delayed allocation.
1000                  * The right neighbor is contiguous with the new allocation.
1001                  */
1002                 temp = PREV.br_blockcount - new->br_blockcount;
1003                 trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_);
1004                 trace_xfs_bmap_pre_update(ip, idx + 1, state, _THIS_IP_);
1005                 xfs_bmbt_set_blockcount(ep, temp);
1006                 xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, idx + 1),
1007                         new->br_startoff, new->br_startblock,
1008                         new->br_blockcount + RIGHT.br_blockcount,
1009                         RIGHT.br_state);
1010                 trace_xfs_bmap_post_update(ip, idx + 1, state, _THIS_IP_);
1011                 ip->i_df.if_lastex = idx + 1;
1012                 if (cur == NULL)
1013                         rval = XFS_ILOG_DEXT;
1014                 else {
1015                         rval = 0;
1016                         if ((error = xfs_bmbt_lookup_eq(cur, RIGHT.br_startoff,
1017                                         RIGHT.br_startblock,
1018                                         RIGHT.br_blockcount, &i)))
1019                                 goto done;
1020                         XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1021                         if ((error = xfs_bmbt_update(cur, new->br_startoff,
1022                                         new->br_startblock,
1023                                         new->br_blockcount +
1024                                         RIGHT.br_blockcount,
1025                                         RIGHT.br_state)))
1026                                 goto done;
1027                 }
1028                 temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
1029                         startblockval(PREV.br_startblock));
1030                 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
1031                 trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
1032                 *dnew = temp;
1033                 /* DELTA: The boundary between two in-core extents moved. */
1034                 temp = PREV.br_startoff;
1035                 temp2 = PREV.br_blockcount +
1036                         RIGHT.br_blockcount;
1037                 break;
1038
1039         case BMAP_RIGHT_FILLING:
1040                 /*
1041                  * Filling in the last part of a previous delayed allocation.
1042                  * The right neighbor is not contiguous.
1043                  */
1044                 temp = PREV.br_blockcount - new->br_blockcount;
1045                 trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_);
1046                 xfs_bmbt_set_blockcount(ep, temp);
1047                 xfs_iext_insert(ip, idx + 1, 1, new, state);
1048                 ip->i_df.if_lastex = idx + 1;
1049                 ip->i_d.di_nextents++;
1050                 if (cur == NULL)
1051                         rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1052                 else {
1053                         rval = XFS_ILOG_CORE;
1054                         if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff,
1055                                         new->br_startblock, new->br_blockcount,
1056                                         &i)))
1057                                 goto done;
1058                         XFS_WANT_CORRUPTED_GOTO(i == 0, done);
1059                         cur->bc_rec.b.br_state = XFS_EXT_NORM;
1060                         if ((error = xfs_btree_insert(cur, &i)))
1061                                 goto done;
1062                         XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1063                 }
1064                 if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
1065                     ip->i_d.di_nextents > ip->i_df.if_ext_max) {
1066                         error = xfs_bmap_extents_to_btree(ip->i_transp, ip,
1067                                 first, flist, &cur, 1, &tmp_rval,
1068                                 XFS_DATA_FORK);
1069                         rval |= tmp_rval;
1070                         if (error)
1071                                 goto done;
1072                 }
1073                 temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
1074                         startblockval(PREV.br_startblock) -
1075                         (cur ? cur->bc_private.b.allocated : 0));
1076                 ep = xfs_iext_get_ext(ifp, idx);
1077                 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
1078                 trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
1079                 *dnew = temp;
1080                 /* DELTA: One in-core extent is split in two. */
1081                 temp = PREV.br_startoff;
1082                 temp2 = PREV.br_blockcount;
1083                 break;
1084
1085         case 0:
1086                 /*
1087                  * Filling in the middle part of a previous delayed allocation.
1088                  * Contiguity is impossible here.
1089                  * This case is avoided almost all the time.
1090                  */
1091                 temp = new->br_startoff - PREV.br_startoff;
1092                 trace_xfs_bmap_pre_update(ip, idx, 0, _THIS_IP_);
1093                 xfs_bmbt_set_blockcount(ep, temp);
1094                 r[0] = *new;
1095                 r[1].br_state = PREV.br_state;
1096                 r[1].br_startblock = 0;
1097                 r[1].br_startoff = new_endoff;
1098                 temp2 = PREV.br_startoff + PREV.br_blockcount - new_endoff;
1099                 r[1].br_blockcount = temp2;
1100                 xfs_iext_insert(ip, idx + 1, 2, &r[0], state);
1101                 ip->i_df.if_lastex = idx + 1;
1102                 ip->i_d.di_nextents++;
1103                 if (cur == NULL)
1104                         rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1105                 else {
1106                         rval = XFS_ILOG_CORE;
1107                         if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff,
1108                                         new->br_startblock, new->br_blockcount,
1109                                         &i)))
1110                                 goto done;
1111                         XFS_WANT_CORRUPTED_GOTO(i == 0, done);
1112                         cur->bc_rec.b.br_state = XFS_EXT_NORM;
1113                         if ((error = xfs_btree_insert(cur, &i)))
1114                                 goto done;
1115                         XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1116                 }
1117                 if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
1118                     ip->i_d.di_nextents > ip->i_df.if_ext_max) {
1119                         error = xfs_bmap_extents_to_btree(ip->i_transp, ip,
1120                                         first, flist, &cur, 1, &tmp_rval,
1121                                         XFS_DATA_FORK);
1122                         rval |= tmp_rval;
1123                         if (error)
1124                                 goto done;
1125                 }
1126                 temp = xfs_bmap_worst_indlen(ip, temp);
1127                 temp2 = xfs_bmap_worst_indlen(ip, temp2);
1128                 diff = (int)(temp + temp2 - startblockval(PREV.br_startblock) -
1129                         (cur ? cur->bc_private.b.allocated : 0));
1130                 if (diff > 0 &&
1131                     xfs_mod_incore_sb(ip->i_mount, XFS_SBS_FDBLOCKS, -((int64_t)diff), rsvd)) {
1132                         /*
1133                          * Ick gross gag me with a spoon.
1134                          */
1135                         ASSERT(0);      /* want to see if this ever happens! */
1136                         while (diff > 0) {
1137                                 if (temp) {
1138                                         temp--;
1139                                         diff--;
1140                                         if (!diff ||
1141                                             !xfs_mod_incore_sb(ip->i_mount,
1142                                                     XFS_SBS_FDBLOCKS, -((int64_t)diff), rsvd))
1143                                                 break;
1144                                 }
1145                                 if (temp2) {
1146                                         temp2--;
1147                                         diff--;
1148                                         if (!diff ||
1149                                             !xfs_mod_incore_sb(ip->i_mount,
1150                                                     XFS_SBS_FDBLOCKS, -((int64_t)diff), rsvd))
1151                                                 break;
1152                                 }
1153                         }
1154                 }
1155                 ep = xfs_iext_get_ext(ifp, idx);
1156                 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
1157                 trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
1158                 trace_xfs_bmap_pre_update(ip, idx + 2, state, _THIS_IP_);
1159                 xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, idx + 2),
1160                         nullstartblock((int)temp2));
1161                 trace_xfs_bmap_post_update(ip, idx + 2, state, _THIS_IP_);
1162                 *dnew = temp + temp2;
1163                 /* DELTA: One in-core extent is split in three. */
1164                 temp = PREV.br_startoff;
1165                 temp2 = PREV.br_blockcount;
1166                 break;
1167
1168         case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
1169         case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
1170         case BMAP_LEFT_FILLING | BMAP_RIGHT_CONTIG:
1171         case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
1172         case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
1173         case BMAP_LEFT_CONTIG:
1174         case BMAP_RIGHT_CONTIG:
1175                 /*
1176                  * These cases are all impossible.
1177                  */
1178                 ASSERT(0);
1179         }
1180         *curp = cur;
1181         if (delta) {
1182                 temp2 += temp;
1183                 if (delta->xed_startoff > temp)
1184                         delta->xed_startoff = temp;
1185                 if (delta->xed_blockcount < temp2)
1186                         delta->xed_blockcount = temp2;
1187         }
1188 done:
1189         *logflagsp = rval;
1190         return error;
1191 #undef  LEFT
1192 #undef  RIGHT
1193 #undef  PREV
1194 }
1195
1196 /*
1197  * Called by xfs_bmap_add_extent to handle cases converting an unwritten
1198  * allocation to a real allocation or vice versa.
1199  */
1200 STATIC int                              /* error */
1201 xfs_bmap_add_extent_unwritten_real(
1202         xfs_inode_t             *ip,    /* incore inode pointer */
1203         xfs_extnum_t            idx,    /* extent number to update/insert */
1204         xfs_btree_cur_t         **curp, /* if *curp is null, not a btree */
1205         xfs_bmbt_irec_t         *new,   /* new data to add to file extents */
1206         int                     *logflagsp, /* inode logging flags */
1207         xfs_extdelta_t          *delta) /* Change made to incore extents */
1208 {
1209         xfs_btree_cur_t         *cur;   /* btree cursor */
1210         xfs_bmbt_rec_host_t     *ep;    /* extent entry for idx */
1211         int                     error;  /* error return value */
1212         int                     i;      /* temp state */
1213         xfs_ifork_t             *ifp;   /* inode fork pointer */
1214         xfs_fileoff_t           new_endoff;     /* end offset of new entry */
1215         xfs_exntst_t            newext; /* new extent state */
1216         xfs_exntst_t            oldext; /* old extent state */
1217         xfs_bmbt_irec_t         r[3];   /* neighbor extent entries */
1218                                         /* left is 0, right is 1, prev is 2 */
1219         int                     rval=0; /* return value (logging flags) */
1220         int                     state = 0;/* state bits, accessed thru macros */
1221         xfs_filblks_t           temp=0;
1222         xfs_filblks_t           temp2=0;
1223
1224 #define LEFT            r[0]
1225 #define RIGHT           r[1]
1226 #define PREV            r[2]
1227         /*
1228          * Set up a bunch of variables to make the tests simpler.
1229          */
1230         error = 0;
1231         cur = *curp;
1232         ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
1233         ep = xfs_iext_get_ext(ifp, idx);
1234         xfs_bmbt_get_all(ep, &PREV);
1235         newext = new->br_state;
1236         oldext = (newext == XFS_EXT_UNWRITTEN) ?
1237                 XFS_EXT_NORM : XFS_EXT_UNWRITTEN;
1238         ASSERT(PREV.br_state == oldext);
1239         new_endoff = new->br_startoff + new->br_blockcount;
1240         ASSERT(PREV.br_startoff <= new->br_startoff);
1241         ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff);
1242
1243         /*
1244          * Set flags determining what part of the previous oldext allocation
1245          * extent is being replaced by a newext allocation.
1246          */
1247         if (PREV.br_startoff == new->br_startoff)
1248                 state |= BMAP_LEFT_FILLING;
1249         if (PREV.br_startoff + PREV.br_blockcount == new_endoff)
1250                 state |= BMAP_RIGHT_FILLING;
1251
1252         /*
1253          * Check and set flags if this segment has a left neighbor.
1254          * Don't set contiguous if the combined extent would be too large.
1255          */
1256         if (idx > 0) {
1257                 state |= BMAP_LEFT_VALID;
1258                 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx - 1), &LEFT);
1259
1260                 if (isnullstartblock(LEFT.br_startblock))
1261                         state |= BMAP_LEFT_DELAY;
1262         }
1263
1264         if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
1265             LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff &&
1266             LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock &&
1267             LEFT.br_state == newext &&
1268             LEFT.br_blockcount + new->br_blockcount <= MAXEXTLEN)
1269                 state |= BMAP_LEFT_CONTIG;
1270
1271         /*
1272          * Check and set flags if this segment has a right neighbor.
1273          * Don't set contiguous if the combined extent would be too large.
1274          * Also check for all-three-contiguous being too large.
1275          */
1276         if (idx < ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t) - 1) {
1277                 state |= BMAP_RIGHT_VALID;
1278                 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx + 1), &RIGHT);
1279                 if (isnullstartblock(RIGHT.br_startblock))
1280                         state |= BMAP_RIGHT_DELAY;
1281         }
1282
1283         if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
1284             new_endoff == RIGHT.br_startoff &&
1285             new->br_startblock + new->br_blockcount == RIGHT.br_startblock &&
1286             newext == RIGHT.br_state &&
1287             new->br_blockcount + RIGHT.br_blockcount <= MAXEXTLEN &&
1288             ((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
1289                        BMAP_RIGHT_FILLING)) !=
1290                       (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
1291                        BMAP_RIGHT_FILLING) ||
1292              LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount
1293                         <= MAXEXTLEN))
1294                 state |= BMAP_RIGHT_CONTIG;
1295
1296         /*
1297          * Switch out based on the FILLING and CONTIG state bits.
1298          */
1299         switch (state & (BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
1300                          BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG)) {
1301         case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
1302              BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
1303                 /*
1304                  * Setting all of a previous oldext extent to newext.
1305                  * The left and right neighbors are both contiguous with new.
1306                  */
1307                 trace_xfs_bmap_pre_update(ip, idx - 1, state, _THIS_IP_);
1308                 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1),
1309                         LEFT.br_blockcount + PREV.br_blockcount +
1310                         RIGHT.br_blockcount);
1311                 trace_xfs_bmap_post_update(ip, idx - 1, state, _THIS_IP_);
1312
1313                 xfs_iext_remove(ip, idx, 2, state);
1314                 ip->i_df.if_lastex = idx - 1;
1315                 ip->i_d.di_nextents -= 2;
1316                 if (cur == NULL)
1317                         rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1318                 else {
1319                         rval = XFS_ILOG_CORE;
1320                         if ((error = xfs_bmbt_lookup_eq(cur, RIGHT.br_startoff,
1321                                         RIGHT.br_startblock,
1322                                         RIGHT.br_blockcount, &i)))
1323                                 goto done;
1324                         XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1325                         if ((error = xfs_btree_delete(cur, &i)))
1326                                 goto done;
1327                         XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1328                         if ((error = xfs_btree_decrement(cur, 0, &i)))
1329                                 goto done;
1330                         XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1331                         if ((error = xfs_btree_delete(cur, &i)))
1332                                 goto done;
1333                         XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1334                         if ((error = xfs_btree_decrement(cur, 0, &i)))
1335                                 goto done;
1336                         XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1337                         if ((error = xfs_bmbt_update(cur, LEFT.br_startoff,
1338                                 LEFT.br_startblock,
1339                                 LEFT.br_blockcount + PREV.br_blockcount +
1340                                 RIGHT.br_blockcount, LEFT.br_state)))
1341                                 goto done;
1342                 }
1343                 /* DELTA: Three in-core extents are replaced by one. */
1344                 temp = LEFT.br_startoff;
1345                 temp2 = LEFT.br_blockcount +
1346                         PREV.br_blockcount +
1347                         RIGHT.br_blockcount;
1348                 break;
1349
1350         case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
1351                 /*
1352                  * Setting all of a previous oldext extent to newext.
1353                  * The left neighbor is contiguous, the right is not.
1354                  */
1355                 trace_xfs_bmap_pre_update(ip, idx - 1, state, _THIS_IP_);
1356                 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1),
1357                         LEFT.br_blockcount + PREV.br_blockcount);
1358                 trace_xfs_bmap_post_update(ip, idx - 1, state, _THIS_IP_);
1359
1360                 ip->i_df.if_lastex = idx - 1;
1361                 xfs_iext_remove(ip, idx, 1, state);
1362                 ip->i_d.di_nextents--;
1363                 if (cur == NULL)
1364                         rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1365                 else {
1366                         rval = XFS_ILOG_CORE;
1367                         if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
1368                                         PREV.br_startblock, PREV.br_blockcount,
1369                                         &i)))
1370                                 goto done;
1371                         XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1372                         if ((error = xfs_btree_delete(cur, &i)))
1373                                 goto done;
1374                         XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1375                         if ((error = xfs_btree_decrement(cur, 0, &i)))
1376                                 goto done;
1377                         XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1378                         if ((error = xfs_bmbt_update(cur, LEFT.br_startoff,
1379                                 LEFT.br_startblock,
1380                                 LEFT.br_blockcount + PREV.br_blockcount,
1381                                 LEFT.br_state)))
1382                                 goto done;
1383                 }
1384                 /* DELTA: Two in-core extents are replaced by one. */
1385                 temp = LEFT.br_startoff;
1386                 temp2 = LEFT.br_blockcount +
1387                         PREV.br_blockcount;
1388                 break;
1389
1390         case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
1391                 /*
1392                  * Setting all of a previous oldext extent to newext.
1393                  * The right neighbor is contiguous, the left is not.
1394                  */
1395                 trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_);
1396                 xfs_bmbt_set_blockcount(ep,
1397                         PREV.br_blockcount + RIGHT.br_blockcount);
1398                 xfs_bmbt_set_state(ep, newext);
1399                 trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
1400                 ip->i_df.if_lastex = idx;
1401                 xfs_iext_remove(ip, idx + 1, 1, state);
1402                 ip->i_d.di_nextents--;
1403                 if (cur == NULL)
1404                         rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1405                 else {
1406                         rval = XFS_ILOG_CORE;
1407                         if ((error = xfs_bmbt_lookup_eq(cur, RIGHT.br_startoff,
1408                                         RIGHT.br_startblock,
1409                                         RIGHT.br_blockcount, &i)))
1410                                 goto done;
1411                         XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1412                         if ((error = xfs_btree_delete(cur, &i)))
1413                                 goto done;
1414                         XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1415                         if ((error = xfs_btree_decrement(cur, 0, &i)))
1416                                 goto done;
1417                         XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1418                         if ((error = xfs_bmbt_update(cur, new->br_startoff,
1419                                 new->br_startblock,
1420                                 new->br_blockcount + RIGHT.br_blockcount,
1421                                 newext)))
1422                                 goto done;
1423                 }
1424                 /* DELTA: Two in-core extents are replaced by one. */
1425                 temp = PREV.br_startoff;
1426                 temp2 = PREV.br_blockcount +
1427                         RIGHT.br_blockcount;
1428                 break;
1429
1430         case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
1431                 /*
1432                  * Setting all of a previous oldext extent to newext.
1433                  * Neither the left nor right neighbors are contiguous with
1434                  * the new one.
1435                  */
1436                 trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_);
1437                 xfs_bmbt_set_state(ep, newext);
1438                 trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
1439
1440                 ip->i_df.if_lastex = idx;
1441                 if (cur == NULL)
1442                         rval = XFS_ILOG_DEXT;
1443                 else {
1444                         rval = 0;
1445                         if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff,
1446                                         new->br_startblock, new->br_blockcount,
1447                                         &i)))
1448                                 goto done;
1449                         XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1450                         if ((error = xfs_bmbt_update(cur, new->br_startoff,
1451                                 new->br_startblock, new->br_blockcount,
1452                                 newext)))
1453                                 goto done;
1454                 }
1455                 /* DELTA: The in-core extent described by new changed type. */
1456                 temp = new->br_startoff;
1457                 temp2 = new->br_blockcount;
1458                 break;
1459
1460         case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG:
1461                 /*
1462                  * Setting the first part of a previous oldext extent to newext.
1463                  * The left neighbor is contiguous.
1464                  */
1465                 trace_xfs_bmap_pre_update(ip, idx - 1, state, _THIS_IP_);
1466                 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1),
1467                         LEFT.br_blockcount + new->br_blockcount);
1468                 xfs_bmbt_set_startoff(ep,
1469                         PREV.br_startoff + new->br_blockcount);
1470                 trace_xfs_bmap_post_update(ip, idx - 1, state, _THIS_IP_);
1471
1472                 trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_);
1473                 xfs_bmbt_set_startblock(ep,
1474                         new->br_startblock + new->br_blockcount);
1475                 xfs_bmbt_set_blockcount(ep,
1476                         PREV.br_blockcount - new->br_blockcount);
1477                 trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
1478
1479                 ip->i_df.if_lastex = idx - 1;
1480                 if (cur == NULL)
1481                         rval = XFS_ILOG_DEXT;
1482                 else {
1483                         rval = 0;
1484                         if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
1485                                         PREV.br_startblock, PREV.br_blockcount,
1486                                         &i)))
1487                                 goto done;
1488                         XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1489                         if ((error = xfs_bmbt_update(cur,
1490                                 PREV.br_startoff + new->br_blockcount,
1491                                 PREV.br_startblock + new->br_blockcount,
1492                                 PREV.br_blockcount - new->br_blockcount,
1493                                 oldext)))
1494                                 goto done;
1495                         if ((error = xfs_btree_decrement(cur, 0, &i)))
1496                                 goto done;
1497                         if (xfs_bmbt_update(cur, LEFT.br_startoff,
1498                                 LEFT.br_startblock,
1499                                 LEFT.br_blockcount + new->br_blockcount,
1500                                 LEFT.br_state))
1501                                 goto done;
1502                 }
1503                 /* DELTA: The boundary between two in-core extents moved. */
1504                 temp = LEFT.br_startoff;
1505                 temp2 = LEFT.br_blockcount +
1506                         PREV.br_blockcount;
1507                 break;
1508
1509         case BMAP_LEFT_FILLING:
1510                 /*
1511                  * Setting the first part of a previous oldext extent to newext.
1512                  * The left neighbor is not contiguous.
1513                  */
1514                 trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_);
1515                 ASSERT(ep && xfs_bmbt_get_state(ep) == oldext);
1516                 xfs_bmbt_set_startoff(ep, new_endoff);
1517                 xfs_bmbt_set_blockcount(ep,
1518                         PREV.br_blockcount - new->br_blockcount);
1519                 xfs_bmbt_set_startblock(ep,
1520                         new->br_startblock + new->br_blockcount);
1521                 trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
1522
1523                 xfs_iext_insert(ip, idx, 1, new, state);
1524                 ip->i_df.if_lastex = idx;
1525                 ip->i_d.di_nextents++;
1526                 if (cur == NULL)
1527                         rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1528                 else {
1529                         rval = XFS_ILOG_CORE;
1530                         if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
1531                                         PREV.br_startblock, PREV.br_blockcount,
1532                                         &i)))
1533                                 goto done;
1534                         XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1535                         if ((error = xfs_bmbt_update(cur,
1536                                 PREV.br_startoff + new->br_blockcount,
1537                                 PREV.br_startblock + new->br_blockcount,
1538                                 PREV.br_blockcount - new->br_blockcount,
1539                                 oldext)))
1540                                 goto done;
1541                         cur->bc_rec.b = *new;
1542                         if ((error = xfs_btree_insert(cur, &i)))
1543                                 goto done;
1544                         XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1545                 }
1546                 /* DELTA: One in-core extent is split in two. */
1547                 temp = PREV.br_startoff;
1548                 temp2 = PREV.br_blockcount;
1549                 break;
1550
1551         case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
1552                 /*
1553                  * Setting the last part of a previous oldext extent to newext.
1554                  * The right neighbor is contiguous with the new allocation.
1555                  */
1556                 trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_);
1557                 trace_xfs_bmap_pre_update(ip, idx + 1, state, _THIS_IP_);
1558                 xfs_bmbt_set_blockcount(ep,
1559                         PREV.br_blockcount - new->br_blockcount);
1560                 trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
1561                 xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, idx + 1),
1562                         new->br_startoff, new->br_startblock,
1563                         new->br_blockcount + RIGHT.br_blockcount, newext);
1564                 trace_xfs_bmap_post_update(ip, idx + 1, state, _THIS_IP_);
1565
1566                 ip->i_df.if_lastex = idx + 1;
1567                 if (cur == NULL)
1568                         rval = XFS_ILOG_DEXT;
1569                 else {
1570                         rval = 0;
1571                         if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
1572                                         PREV.br_startblock,
1573                                         PREV.br_blockcount, &i)))
1574                                 goto done;
1575                         XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1576                         if ((error = xfs_bmbt_update(cur, PREV.br_startoff,
1577                                 PREV.br_startblock,
1578                                 PREV.br_blockcount - new->br_blockcount,
1579                                 oldext)))
1580                                 goto done;
1581                         if ((error = xfs_btree_increment(cur, 0, &i)))
1582                                 goto done;
1583                         if ((error = xfs_bmbt_update(cur, new->br_startoff,
1584                                 new->br_startblock,
1585                                 new->br_blockcount + RIGHT.br_blockcount,
1586                                 newext)))
1587                                 goto done;
1588                 }
1589                 /* DELTA: The boundary between two in-core extents moved. */
1590                 temp = PREV.br_startoff;
1591                 temp2 = PREV.br_blockcount +
1592                         RIGHT.br_blockcount;
1593                 break;
1594
1595         case BMAP_RIGHT_FILLING:
1596                 /*
1597                  * Setting the last part of a previous oldext extent to newext.
1598                  * The right neighbor is not contiguous.
1599                  */
1600                 trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_);
1601                 xfs_bmbt_set_blockcount(ep,
1602                         PREV.br_blockcount - new->br_blockcount);
1603                 trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
1604
1605                 xfs_iext_insert(ip, idx + 1, 1, new, state);
1606                 ip->i_df.if_lastex = idx + 1;
1607                 ip->i_d.di_nextents++;
1608                 if (cur == NULL)
1609                         rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1610                 else {
1611                         rval = XFS_ILOG_CORE;
1612                         if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
1613                                         PREV.br_startblock, PREV.br_blockcount,
1614                                         &i)))
1615                                 goto done;
1616                         XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1617                         if ((error = xfs_bmbt_update(cur, PREV.br_startoff,
1618                                 PREV.br_startblock,
1619                                 PREV.br_blockcount - new->br_blockcount,
1620                                 oldext)))
1621                                 goto done;
1622                         if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff,
1623                                         new->br_startblock, new->br_blockcount,
1624                                         &i)))
1625                                 goto done;
1626                         XFS_WANT_CORRUPTED_GOTO(i == 0, done);
1627                         cur->bc_rec.b.br_state = XFS_EXT_NORM;
1628                         if ((error = xfs_btree_insert(cur, &i)))
1629                                 goto done;
1630                         XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1631                 }
1632                 /* DELTA: One in-core extent is split in two. */
1633                 temp = PREV.br_startoff;
1634                 temp2 = PREV.br_blockcount;
1635                 break;
1636
1637         case 0:
1638                 /*
1639                  * Setting the middle part of a previous oldext extent to
1640                  * newext.  Contiguity is impossible here.
1641                  * One extent becomes three extents.
1642                  */
1643                 trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_);
1644                 xfs_bmbt_set_blockcount(ep,
1645                         new->br_startoff - PREV.br_startoff);
1646                 trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
1647
1648                 r[0] = *new;
1649                 r[1].br_startoff = new_endoff;
1650                 r[1].br_blockcount =
1651                         PREV.br_startoff + PREV.br_blockcount - new_endoff;
1652                 r[1].br_startblock = new->br_startblock + new->br_blockcount;
1653                 r[1].br_state = oldext;
1654                 xfs_iext_insert(ip, idx + 1, 2, &r[0], state);
1655                 ip->i_df.if_lastex = idx + 1;
1656                 ip->i_d.di_nextents += 2;
1657                 if (cur == NULL)
1658                         rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1659                 else {
1660                         rval = XFS_ILOG_CORE;
1661                         if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
1662                                         PREV.br_startblock, PREV.br_blockcount,
1663                                         &i)))
1664                                 goto done;
1665                         XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1666                         /* new right extent - oldext */
1667                         if ((error = xfs_bmbt_update(cur, r[1].br_startoff,
1668                                 r[1].br_startblock, r[1].br_blockcount,
1669                                 r[1].br_state)))
1670                                 goto done;
1671                         /* new left extent - oldext */
1672                         cur->bc_rec.b = PREV;
1673                         cur->bc_rec.b.br_blockcount =
1674                                 new->br_startoff - PREV.br_startoff;
1675                         if ((error = xfs_btree_insert(cur, &i)))
1676                                 goto done;
1677                         XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1678                         /*
1679                          * Reset the cursor to the position of the new extent
1680                          * we are about to insert as we can't trust it after
1681                          * the previous insert.
1682                          */
1683                         if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff,
1684                                         new->br_startblock, new->br_blockcount,
1685                                         &i)))
1686                                 goto done;
1687                         XFS_WANT_CORRUPTED_GOTO(i == 0, done);
1688                         /* new middle extent - newext */
1689                         cur->bc_rec.b.br_state = new->br_state;
1690                         if ((error = xfs_btree_insert(cur, &i)))
1691                                 goto done;
1692                         XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1693                 }
1694                 /* DELTA: One in-core extent is split in three. */
1695                 temp = PREV.br_startoff;
1696                 temp2 = PREV.br_blockcount;
1697                 break;
1698
1699         case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
1700         case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
1701         case BMAP_LEFT_FILLING | BMAP_RIGHT_CONTIG:
1702         case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
1703         case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
1704         case BMAP_LEFT_CONTIG:
1705         case BMAP_RIGHT_CONTIG:
1706                 /*
1707                  * These cases are all impossible.
1708                  */
1709                 ASSERT(0);
1710         }
1711         *curp = cur;
1712         if (delta) {
1713                 temp2 += temp;
1714                 if (delta->xed_startoff > temp)
1715                         delta->xed_startoff = temp;
1716                 if (delta->xed_blockcount < temp2)
1717                         delta->xed_blockcount = temp2;
1718         }
1719 done:
1720         *logflagsp = rval;
1721         return error;
1722 #undef  LEFT
1723 #undef  RIGHT
1724 #undef  PREV
1725 }
1726
1727 /*
1728  * Called by xfs_bmap_add_extent to handle cases converting a hole
1729  * to a delayed allocation.
1730  */
1731 /*ARGSUSED*/
1732 STATIC int                              /* error */
1733 xfs_bmap_add_extent_hole_delay(
1734         xfs_inode_t             *ip,    /* incore inode pointer */
1735         xfs_extnum_t            idx,    /* extent number to update/insert */
1736         xfs_bmbt_irec_t         *new,   /* new data to add to file extents */
1737         int                     *logflagsp, /* inode logging flags */
1738         xfs_extdelta_t          *delta, /* Change made to incore extents */
1739         int                     rsvd)           /* OK to allocate reserved blocks */
1740 {
1741         xfs_bmbt_rec_host_t     *ep;    /* extent record for idx */
1742         xfs_ifork_t             *ifp;   /* inode fork pointer */
1743         xfs_bmbt_irec_t         left;   /* left neighbor extent entry */
1744         xfs_filblks_t           newlen=0;       /* new indirect size */
1745         xfs_filblks_t           oldlen=0;       /* old indirect size */
1746         xfs_bmbt_irec_t         right;  /* right neighbor extent entry */
1747         int                     state;  /* state bits, accessed thru macros */
1748         xfs_filblks_t           temp=0; /* temp for indirect calculations */
1749         xfs_filblks_t           temp2=0;
1750
1751         ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
1752         ep = xfs_iext_get_ext(ifp, idx);
1753         state = 0;
1754         ASSERT(isnullstartblock(new->br_startblock));
1755
1756         /*
1757          * Check and set flags if this segment has a left neighbor
1758          */
1759         if (idx > 0) {
1760                 state |= BMAP_LEFT_VALID;
1761                 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx - 1), &left);
1762
1763                 if (isnullstartblock(left.br_startblock))
1764                         state |= BMAP_LEFT_DELAY;
1765         }
1766
1767         /*
1768          * Check and set flags if the current (right) segment exists.
1769          * If it doesn't exist, we're converting the hole at end-of-file.
1770          */
1771         if (idx < ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t)) {
1772                 state |= BMAP_RIGHT_VALID;
1773                 xfs_bmbt_get_all(ep, &right);
1774
1775                 if (isnullstartblock(right.br_startblock))
1776                         state |= BMAP_RIGHT_DELAY;
1777         }
1778
1779         /*
1780          * Set contiguity flags on the left and right neighbors.
1781          * Don't let extents get too large, even if the pieces are contiguous.
1782          */
1783         if ((state & BMAP_LEFT_VALID) && (state & BMAP_LEFT_DELAY) &&
1784             left.br_startoff + left.br_blockcount == new->br_startoff &&
1785             left.br_blockcount + new->br_blockcount <= MAXEXTLEN)
1786                 state |= BMAP_LEFT_CONTIG;
1787
1788         if ((state & BMAP_RIGHT_VALID) && (state & BMAP_RIGHT_DELAY) &&
1789             new->br_startoff + new->br_blockcount == right.br_startoff &&
1790             new->br_blockcount + right.br_blockcount <= MAXEXTLEN &&
1791             (!(state & BMAP_LEFT_CONTIG) ||
1792              (left.br_blockcount + new->br_blockcount +
1793               right.br_blockcount <= MAXEXTLEN)))
1794                 state |= BMAP_RIGHT_CONTIG;
1795
1796         /*
1797          * Switch out based on the contiguity flags.
1798          */
1799         switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) {
1800         case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
1801                 /*
1802                  * New allocation is contiguous with delayed allocations
1803                  * on the left and on the right.
1804                  * Merge all three into a single extent record.
1805                  */
1806                 temp = left.br_blockcount + new->br_blockcount +
1807                         right.br_blockcount;
1808
1809                 trace_xfs_bmap_pre_update(ip, idx - 1, state, _THIS_IP_);
1810                 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), temp);
1811                 oldlen = startblockval(left.br_startblock) +
1812                         startblockval(new->br_startblock) +
1813                         startblockval(right.br_startblock);
1814                 newlen = xfs_bmap_worst_indlen(ip, temp);
1815                 xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, idx - 1),
1816                         nullstartblock((int)newlen));
1817                 trace_xfs_bmap_post_update(ip, idx - 1, state, _THIS_IP_);
1818
1819                 xfs_iext_remove(ip, idx, 1, state);
1820                 ip->i_df.if_lastex = idx - 1;
1821                 /* DELTA: Two in-core extents were replaced by one. */
1822                 temp2 = temp;
1823                 temp = left.br_startoff;
1824                 break;
1825
1826         case BMAP_LEFT_CONTIG:
1827                 /*
1828                  * New allocation is contiguous with a delayed allocation
1829                  * on the left.
1830                  * Merge the new allocation with the left neighbor.
1831                  */
1832                 temp = left.br_blockcount + new->br_blockcount;
1833                 trace_xfs_bmap_pre_update(ip, idx - 1, state, _THIS_IP_);
1834                 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), temp);
1835                 oldlen = startblockval(left.br_startblock) +
1836                         startblockval(new->br_startblock);
1837                 newlen = xfs_bmap_worst_indlen(ip, temp);
1838                 xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, idx - 1),
1839                         nullstartblock((int)newlen));
1840                 trace_xfs_bmap_post_update(ip, idx - 1, state, _THIS_IP_);
1841
1842                 ip->i_df.if_lastex = idx - 1;
1843                 /* DELTA: One in-core extent grew into a hole. */
1844                 temp2 = temp;
1845                 temp = left.br_startoff;
1846                 break;
1847
1848         case BMAP_RIGHT_CONTIG:
1849                 /*
1850                  * New allocation is contiguous with a delayed allocation
1851                  * on the right.
1852                  * Merge the new allocation with the right neighbor.
1853                  */
1854                 trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_);
1855                 temp = new->br_blockcount + right.br_blockcount;
1856                 oldlen = startblockval(new->br_startblock) +
1857                         startblockval(right.br_startblock);
1858                 newlen = xfs_bmap_worst_indlen(ip, temp);
1859                 xfs_bmbt_set_allf(ep, new->br_startoff,
1860                         nullstartblock((int)newlen), temp, right.br_state);
1861                 trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
1862
1863                 ip->i_df.if_lastex = idx;
1864                 /* DELTA: One in-core extent grew into a hole. */
1865                 temp2 = temp;
1866                 temp = new->br_startoff;
1867                 break;
1868
1869         case 0:
1870                 /*
1871                  * New allocation is not contiguous with another
1872                  * delayed allocation.
1873                  * Insert a new entry.
1874                  */
1875                 oldlen = newlen = 0;
1876                 xfs_iext_insert(ip, idx, 1, new, state);
1877                 ip->i_df.if_lastex = idx;
1878                 /* DELTA: A new in-core extent was added in a hole. */
1879                 temp2 = new->br_blockcount;
1880                 temp = new->br_startoff;
1881                 break;
1882         }
1883         if (oldlen != newlen) {
1884                 ASSERT(oldlen > newlen);
1885                 xfs_mod_incore_sb(ip->i_mount, XFS_SBS_FDBLOCKS,
1886                         (int64_t)(oldlen - newlen), rsvd);
1887                 /*
1888                  * Nothing to do for disk quota accounting here.
1889                  */
1890         }
1891         if (delta) {
1892                 temp2 += temp;
1893                 if (delta->xed_startoff > temp)
1894                         delta->xed_startoff = temp;
1895                 if (delta->xed_blockcount < temp2)
1896                         delta->xed_blockcount = temp2;
1897         }
1898         *logflagsp = 0;
1899         return 0;
1900 }
1901
1902 /*
1903  * Called by xfs_bmap_add_extent to handle cases converting a hole
1904  * to a real allocation.
1905  */
1906 STATIC int                              /* error */
1907 xfs_bmap_add_extent_hole_real(
1908         xfs_inode_t             *ip,    /* incore inode pointer */
1909         xfs_extnum_t            idx,    /* extent number to update/insert */
1910         xfs_btree_cur_t         *cur,   /* if null, not a btree */
1911         xfs_bmbt_irec_t         *new,   /* new data to add to file extents */
1912         int                     *logflagsp, /* inode logging flags */
1913         xfs_extdelta_t          *delta, /* Change made to incore extents */
1914         int                     whichfork) /* data or attr fork */
1915 {
1916         xfs_bmbt_rec_host_t     *ep;    /* pointer to extent entry ins. point */
1917         int                     error;  /* error return value */
1918         int                     i;      /* temp state */
1919         xfs_ifork_t             *ifp;   /* inode fork pointer */
1920         xfs_bmbt_irec_t         left;   /* left neighbor extent entry */
1921         xfs_bmbt_irec_t         right;  /* right neighbor extent entry */
1922         int                     rval=0; /* return value (logging flags) */
1923         int                     state;  /* state bits, accessed thru macros */
1924         xfs_filblks_t           temp=0;
1925         xfs_filblks_t           temp2=0;
1926
1927         ifp = XFS_IFORK_PTR(ip, whichfork);
1928         ASSERT(idx <= ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t));
1929         ep = xfs_iext_get_ext(ifp, idx);
1930         state = 0;
1931
1932         if (whichfork == XFS_ATTR_FORK)
1933                 state |= BMAP_ATTRFORK;
1934
1935         /*
1936          * Check and set flags if this segment has a left neighbor.
1937          */
1938         if (idx > 0) {
1939                 state |= BMAP_LEFT_VALID;
1940                 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx - 1), &left);
1941                 if (isnullstartblock(left.br_startblock))
1942                         state |= BMAP_LEFT_DELAY;
1943         }
1944
1945         /*
1946          * Check and set flags if this segment has a current value.
1947          * Not true if we're inserting into the "hole" at eof.
1948          */
1949         if (idx < ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t)) {
1950                 state |= BMAP_RIGHT_VALID;
1951                 xfs_bmbt_get_all(ep, &right);
1952                 if (isnullstartblock(right.br_startblock))
1953                         state |= BMAP_RIGHT_DELAY;
1954         }
1955
1956         /*
1957          * We're inserting a real allocation between "left" and "right".
1958          * Set the contiguity flags.  Don't let extents get too large.
1959          */
1960         if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
1961             left.br_startoff + left.br_blockcount == new->br_startoff &&
1962             left.br_startblock + left.br_blockcount == new->br_startblock &&
1963             left.br_state == new->br_state &&
1964             left.br_blockcount + new->br_blockcount <= MAXEXTLEN)
1965                 state |= BMAP_LEFT_CONTIG;
1966
1967         if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
1968             new->br_startoff + new->br_blockcount == right.br_startoff &&
1969             new->br_startblock + new->br_blockcount == right.br_startblock &&
1970             new->br_state == right.br_state &&
1971             new->br_blockcount + right.br_blockcount <= MAXEXTLEN &&
1972             (!(state & BMAP_LEFT_CONTIG) ||
1973              left.br_blockcount + new->br_blockcount +
1974              right.br_blockcount <= MAXEXTLEN))
1975                 state |= BMAP_RIGHT_CONTIG;
1976
1977         error = 0;
1978         /*
1979          * Select which case we're in here, and implement it.
1980          */
1981         switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) {
1982         case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
1983                 /*
1984                  * New allocation is contiguous with real allocations on the
1985                  * left and on the right.
1986                  * Merge all three into a single extent record.
1987                  */
1988                 trace_xfs_bmap_pre_update(ip, idx - 1, state, _THIS_IP_);
1989                 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1),
1990                         left.br_blockcount + new->br_blockcount +
1991                         right.br_blockcount);
1992                 trace_xfs_bmap_post_update(ip, idx - 1, state, _THIS_IP_);
1993
1994                 xfs_iext_remove(ip, idx, 1, state);
1995                 ifp->if_lastex = idx - 1;
1996                 XFS_IFORK_NEXT_SET(ip, whichfork,
1997                         XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
1998                 if (cur == NULL) {
1999                         rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
2000                 } else {
2001                         rval = XFS_ILOG_CORE;
2002                         if ((error = xfs_bmbt_lookup_eq(cur,
2003                                         right.br_startoff,
2004                                         right.br_startblock,
2005                                         right.br_blockcount, &i)))
2006                                 goto done;
2007                         XFS_WANT_CORRUPTED_GOTO(i == 1, done);
2008                         if ((error = xfs_btree_delete(cur, &i)))
2009                                 goto done;
2010                         XFS_WANT_CORRUPTED_GOTO(i == 1, done);
2011                         if ((error = xfs_btree_decrement(cur, 0, &i)))
2012                                 goto done;
2013                         XFS_WANT_CORRUPTED_GOTO(i == 1, done);
2014                         if ((error = xfs_bmbt_update(cur, left.br_startoff,
2015                                         left.br_startblock,
2016                                         left.br_blockcount +
2017                                                 new->br_blockcount +
2018                                                 right.br_blockcount,
2019                                         left.br_state)))
2020                                 goto done;
2021                 }
2022                 /* DELTA: Two in-core extents were replaced by one. */
2023                 temp = left.br_startoff;
2024                 temp2 = left.br_blockcount +
2025                         new->br_blockcount +
2026                         right.br_blockcount;
2027                 break;
2028
2029         case BMAP_LEFT_CONTIG:
2030                 /*
2031                  * New allocation is contiguous with a real allocation
2032                  * on the left.
2033                  * Merge the new allocation with the left neighbor.
2034                  */
2035                 trace_xfs_bmap_pre_update(ip, idx - 1, state, _THIS_IP_);
2036                 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1),
2037                         left.br_blockcount + new->br_blockcount);
2038                 trace_xfs_bmap_post_update(ip, idx - 1, state, _THIS_IP_);
2039
2040                 ifp->if_lastex = idx - 1;
2041                 if (cur == NULL) {
2042                         rval = xfs_ilog_fext(whichfork);
2043                 } else {
2044                         rval = 0;
2045                         if ((error = xfs_bmbt_lookup_eq(cur,
2046                                         left.br_startoff,
2047                                         left.br_startblock,
2048                                         left.br_blockcount, &i)))
2049                                 goto done;
2050                         XFS_WANT_CORRUPTED_GOTO(i == 1, done);
2051                         if ((error = xfs_bmbt_update(cur, left.br_startoff,
2052                                         left.br_startblock,
2053                                         left.br_blockcount +
2054                                                 new->br_blockcount,
2055                                         left.br_state)))
2056                                 goto done;
2057                 }
2058                 /* DELTA: One in-core extent grew. */
2059                 temp = left.br_startoff;
2060                 temp2 = left.br_blockcount +
2061                         new->br_blockcount;
2062                 break;
2063
2064         case BMAP_RIGHT_CONTIG:
2065                 /*
2066                  * New allocation is contiguous with a real allocation
2067                  * on the right.
2068                  * Merge the new allocation with the right neighbor.
2069                  */
2070                 trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_);
2071                 xfs_bmbt_set_allf(ep, new->br_startoff, new->br_startblock,
2072                         new->br_blockcount + right.br_blockcount,
2073                         right.br_state);
2074                 trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
2075
2076                 ifp->if_lastex = idx;
2077                 if (cur == NULL) {
2078                         rval = xfs_ilog_fext(whichfork);
2079                 } else {
2080                         rval = 0;
2081                         if ((error = xfs_bmbt_lookup_eq(cur,
2082                                         right.br_startoff,
2083                                         right.br_startblock,
2084                                         right.br_blockcount, &i)))
2085                                 goto done;
2086                         XFS_WANT_CORRUPTED_GOTO(i == 1, done);
2087                         if ((error = xfs_bmbt_update(cur, new->br_startoff,
2088                                         new->br_startblock,
2089                                         new->br_blockcount +
2090                                                 right.br_blockcount,
2091                                         right.br_state)))
2092                                 goto done;
2093                 }
2094                 /* DELTA: One in-core extent grew. */
2095                 temp = new->br_startoff;
2096                 temp2 = new->br_blockcount +
2097                         right.br_blockcount;
2098                 break;
2099
2100         case 0:
2101                 /*
2102                  * New allocation is not contiguous with another
2103                  * real allocation.
2104                  * Insert a new entry.
2105                  */
2106                 xfs_iext_insert(ip, idx, 1, new, state);
2107                 ifp->if_lastex = idx;
2108                 XFS_IFORK_NEXT_SET(ip, whichfork,
2109                         XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
2110                 if (cur == NULL) {
2111                         rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
2112                 } else {
2113                         rval = XFS_ILOG_CORE;
2114                         if ((error = xfs_bmbt_lookup_eq(cur,
2115                                         new->br_startoff,
2116                                         new->br_startblock,
2117                                         new->br_blockcount, &i)))
2118                                 goto done;
2119                         XFS_WANT_CORRUPTED_GOTO(i == 0, done);
2120                         cur->bc_rec.b.br_state = new->br_state;
2121                         if ((error = xfs_btree_insert(cur, &i)))
2122                                 goto done;
2123                         XFS_WANT_CORRUPTED_GOTO(i == 1, done);
2124                 }
2125                 /* DELTA: A new extent was added in a hole. */
2126                 temp = new->br_startoff;
2127                 temp2 = new->br_blockcount;
2128                 break;
2129         }
2130         if (delta) {
2131                 temp2 += temp;
2132                 if (delta->xed_startoff > temp)
2133                         delta->xed_startoff = temp;
2134                 if (delta->xed_blockcount < temp2)
2135                         delta->xed_blockcount = temp2;
2136         }
2137 done:
2138         *logflagsp = rval;
2139         return error;
2140 }
2141
2142 /*
2143  * Adjust the size of the new extent based on di_extsize and rt extsize.
2144  */
2145 STATIC int
2146 xfs_bmap_extsize_align(
2147         xfs_mount_t     *mp,
2148         xfs_bmbt_irec_t *gotp,          /* next extent pointer */
2149         xfs_bmbt_irec_t *prevp,         /* previous extent pointer */
2150         xfs_extlen_t    extsz,          /* align to this extent size */
2151         int             rt,             /* is this a realtime inode? */
2152         int             eof,            /* is extent at end-of-file? */
2153         int             delay,          /* creating delalloc extent? */
2154         int             convert,        /* overwriting unwritten extent? */
2155         xfs_fileoff_t   *offp,          /* in/out: aligned offset */
2156         xfs_extlen_t    *lenp)          /* in/out: aligned length */
2157 {
2158         xfs_fileoff_t   orig_off;       /* original offset */
2159         xfs_extlen_t    orig_alen;      /* original length */
2160         xfs_fileoff_t   orig_end;       /* original off+len */
2161         xfs_fileoff_t   nexto;          /* next file offset */
2162         xfs_fileoff_t   prevo;          /* previous file offset */
2163         xfs_fileoff_t   align_off;      /* temp for offset */
2164         xfs_extlen_t    align_alen;     /* temp for length */
2165         xfs_extlen_t    temp;           /* temp for calculations */
2166
2167         if (convert)
2168                 return 0;
2169
2170         orig_off = align_off = *offp;
2171         orig_alen = align_alen = *lenp;
2172         orig_end = orig_off + orig_alen;
2173
2174         /*
2175          * If this request overlaps an existing extent, then don't
2176          * attempt to perform any additional alignment.
2177          */
2178         if (!delay && !eof &&
2179             (orig_off >= gotp->br_startoff) &&
2180             (orig_end <= gotp->br_startoff + gotp->br_blockcount)) {
2181                 return 0;
2182         }
2183
2184         /*
2185          * If the file offset is unaligned vs. the extent size
2186          * we need to align it.  This will be possible unless
2187          * the file was previously written with a kernel that didn't
2188          * perform this alignment, or if a truncate shot us in the
2189          * foot.
2190          */
2191         temp = do_mod(orig_off, extsz);
2192         if (temp) {
2193                 align_alen += temp;
2194                 align_off -= temp;
2195         }
2196         /*
2197          * Same adjustment for the end of the requested area.
2198          */
2199         if ((temp = (align_alen % extsz))) {
2200                 align_alen += extsz - temp;
2201         }
2202         /*
2203          * If the previous block overlaps with this proposed allocation
2204          * then move the start forward without adjusting the length.
2205          */
2206         if (prevp->br_startoff != NULLFILEOFF) {
2207                 if (prevp->br_startblock == HOLESTARTBLOCK)
2208                         prevo = prevp->br_startoff;
2209                 else
2210                         prevo = prevp->br_startoff + prevp->br_blockcount;
2211         } else
2212                 prevo = 0;
2213         if (align_off != orig_off && align_off < prevo)
2214                 align_off = prevo;
2215         /*
2216          * If the next block overlaps with this proposed allocation
2217          * then move the start back without adjusting the length,
2218          * but not before offset 0.
2219          * This may of course make the start overlap previous block,
2220          * and if we hit the offset 0 limit then the next block
2221          * can still overlap too.
2222          */
2223         if (!eof && gotp->br_startoff != NULLFILEOFF) {
2224                 if ((delay && gotp->br_startblock == HOLESTARTBLOCK) ||
2225                     (!delay && gotp->br_startblock == DELAYSTARTBLOCK))
2226                         nexto = gotp->br_startoff + gotp->br_blockcount;
2227                 else
2228                         nexto = gotp->br_startoff;
2229         } else
2230                 nexto = NULLFILEOFF;
2231         if (!eof &&
2232             align_off + align_alen != orig_end &&
2233             align_off + align_alen > nexto)
2234                 align_off = nexto > align_alen ? nexto - align_alen : 0;
2235         /*
2236          * If we're now overlapping the next or previous extent that
2237          * means we can't fit an extsz piece in this hole.  Just move
2238          * the start forward to the first valid spot and set
2239          * the length so we hit the end.
2240          */
2241         if (align_off != orig_off && align_off < prevo)
2242                 align_off = prevo;
2243         if (align_off + align_alen != orig_end &&
2244             align_off + align_alen > nexto &&
2245             nexto != NULLFILEOFF) {
2246                 ASSERT(nexto > prevo);
2247                 align_alen = nexto - align_off;
2248         }
2249
2250         /*
2251          * If realtime, and the result isn't a multiple of the realtime
2252          * extent size we need to remove blocks until it is.
2253          */
2254         if (rt && (temp = (align_alen % mp->m_sb.sb_rextsize))) {
2255                 /*
2256                  * We're not covering the original request, or
2257                  * we won't be able to once we fix the length.
2258                  */
2259                 if (orig_off < align_off ||
2260                     orig_end > align_off + align_alen ||
2261                     align_alen - temp < orig_alen)
2262                         return XFS_ERROR(EINVAL);
2263                 /*
2264                  * Try to fix it by moving the start up.
2265                  */
2266                 if (align_off + temp <= orig_off) {
2267                         align_alen -= temp;
2268                         align_off += temp;
2269                 }
2270                 /*
2271                  * Try to fix it by moving the end in.
2272                  */
2273                 else if (align_off + align_alen - temp >= orig_end)
2274                         align_alen -= temp;
2275                 /*
2276                  * Set the start to the minimum then trim the length.
2277                  */
2278                 else {
2279                         align_alen -= orig_off - align_off;
2280                         align_off = orig_off;
2281                         align_alen -= align_alen % mp->m_sb.sb_rextsize;
2282                 }
2283                 /*
2284                  * Result doesn't cover the request, fail it.
2285                  */
2286                 if (orig_off < align_off || orig_end > align_off + align_alen)
2287                         return XFS_ERROR(EINVAL);
2288         } else {
2289                 ASSERT(orig_off >= align_off);
2290                 ASSERT(orig_end <= align_off + align_alen);
2291         }
2292
2293 #ifdef DEBUG
2294         if (!eof && gotp->br_startoff != NULLFILEOFF)
2295                 ASSERT(align_off + align_alen <= gotp->br_startoff);
2296         if (prevp->br_startoff != NULLFILEOFF)
2297                 ASSERT(align_off >= prevp->br_startoff + prevp->br_blockcount);
2298 #endif
2299
2300         *lenp = align_alen;
2301         *offp = align_off;
2302         return 0;
2303 }
2304
2305 #define XFS_ALLOC_GAP_UNITS     4
2306
2307 STATIC void
2308 xfs_bmap_adjacent(
2309         xfs_bmalloca_t  *ap)            /* bmap alloc argument struct */
2310 {
2311         xfs_fsblock_t   adjust;         /* adjustment to block numbers */
2312         xfs_agnumber_t  fb_agno;        /* ag number of ap->firstblock */
2313         xfs_mount_t     *mp;            /* mount point structure */
2314         int             nullfb;         /* true if ap->firstblock isn't set */
2315         int             rt;             /* true if inode is realtime */
2316
2317 #define ISVALID(x,y)    \
2318         (rt ? \
2319                 (x) < mp->m_sb.sb_rblocks : \
2320                 XFS_FSB_TO_AGNO(mp, x) == XFS_FSB_TO_AGNO(mp, y) && \
2321                 XFS_FSB_TO_AGNO(mp, x) < mp->m_sb.sb_agcount && \
2322                 XFS_FSB_TO_AGBNO(mp, x) < mp->m_sb.sb_agblocks)
2323
2324         mp = ap->ip->i_mount;
2325         nullfb = ap->firstblock == NULLFSBLOCK;
2326         rt = XFS_IS_REALTIME_INODE(ap->ip) && ap->userdata;
2327         fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, ap->firstblock);
2328         /*
2329          * If allocating at eof, and there's a previous real block,
2330          * try to use its last block as our starting point.
2331          */
2332         if (ap->eof && ap->prevp->br_startoff != NULLFILEOFF &&
2333             !isnullstartblock(ap->prevp->br_startblock) &&
2334             ISVALID(ap->prevp->br_startblock + ap->prevp->br_blockcount,
2335                     ap->prevp->br_startblock)) {
2336                 ap->rval = ap->prevp->br_startblock + ap->prevp->br_blockcount;
2337                 /*
2338                  * Adjust for the gap between prevp and us.
2339                  */
2340                 adjust = ap->off -
2341                         (ap->prevp->br_startoff + ap->prevp->br_blockcount);
2342                 if (adjust &&
2343                     ISVALID(ap->rval + adjust, ap->prevp->br_startblock))
2344                         ap->rval += adjust;
2345         }
2346         /*
2347          * If not at eof, then compare the two neighbor blocks.
2348          * Figure out whether either one gives us a good starting point,
2349          * and pick the better one.
2350          */
2351         else if (!ap->eof) {
2352                 xfs_fsblock_t   gotbno;         /* right side block number */
2353                 xfs_fsblock_t   gotdiff=0;      /* right side difference */
2354                 xfs_fsblock_t   prevbno;        /* left side block number */
2355                 xfs_fsblock_t   prevdiff=0;     /* left side difference */
2356
2357                 /*
2358                  * If there's a previous (left) block, select a requested
2359                  * start block based on it.
2360                  */
2361                 if (ap->prevp->br_startoff != NULLFILEOFF &&
2362                     !isnullstartblock(ap->prevp->br_startblock) &&
2363                     (prevbno = ap->prevp->br_startblock +
2364                                ap->prevp->br_blockcount) &&
2365                     ISVALID(prevbno, ap->prevp->br_startblock)) {
2366                         /*
2367                          * Calculate gap to end of previous block.
2368                          */
2369                         adjust = prevdiff = ap->off -
2370                                 (ap->prevp->br_startoff +
2371                                  ap->prevp->br_blockcount);
2372                         /*
2373                          * Figure the startblock based on the previous block's
2374                          * end and the gap size.
2375                          * Heuristic!
2376                          * If the gap is large relative to the piece we're
2377                          * allocating, or using it gives us an invalid block
2378                          * number, then just use the end of the previous block.
2379                          */
2380                         if (prevdiff <= XFS_ALLOC_GAP_UNITS * ap->alen &&
2381                             ISVALID(prevbno + prevdiff,
2382                                     ap->prevp->br_startblock))
2383                                 prevbno += adjust;
2384                         else
2385                                 prevdiff += adjust;
2386                         /*
2387                          * If the firstblock forbids it, can't use it,
2388                          * must use default.
2389                          */
2390                         if (!rt && !nullfb &&
2391                             XFS_FSB_TO_AGNO(mp, prevbno) != fb_agno)
2392                                 prevbno = NULLFSBLOCK;
2393                 }
2394                 /*
2395                  * No previous block or can't follow it, just default.
2396                  */
2397                 else
2398                         prevbno = NULLFSBLOCK;
2399                 /*
2400                  * If there's a following (right) block, select a requested
2401                  * start block based on it.
2402                  */
2403                 if (!isnullstartblock(ap->gotp->br_startblock)) {
2404                         /*
2405                          * Calculate gap to start of next block.
2406                          */
2407                         adjust = gotdiff = ap->gotp->br_startoff - ap->off;
2408                         /*
2409                          * Figure the startblock based on the next block's
2410                          * start and the gap size.
2411                          */
2412                         gotbno = ap->gotp->br_startblock;
2413                         /*
2414                          * Heuristic!
2415                          * If the gap is large relative to the piece we're
2416                          * allocating, or using it gives us an invalid block
2417                          * number, then just use the start of the next block
2418                          * offset by our length.
2419                          */
2420                         if (gotdiff <= XFS_ALLOC_GAP_UNITS * ap->alen &&
2421                             ISVALID(gotbno - gotdiff, gotbno))
2422                                 gotbno -= adjust;
2423                         else if (ISVALID(gotbno - ap->alen, gotbno)) {
2424                                 gotbno -= ap->alen;
2425                                 gotdiff += adjust - ap->alen;
2426                         } else
2427                                 gotdiff += adjust;
2428                         /*
2429                          * If the firstblock forbids it, can't use it,
2430                          * must use default.
2431                          */
2432                         if (!rt && !nullfb &&
2433                             XFS_FSB_TO_AGNO(mp, gotbno) != fb_agno)
2434                                 gotbno = NULLFSBLOCK;
2435                 }
2436                 /*
2437                  * No next block, just default.
2438                  */
2439                 else
2440                         gotbno = NULLFSBLOCK;
2441                 /*
2442                  * If both valid, pick the better one, else the only good
2443                  * one, else ap->rval is already set (to 0 or the inode block).
2444                  */
2445                 if (prevbno != NULLFSBLOCK && gotbno != NULLFSBLOCK)
2446                         ap->rval = prevdiff <= gotdiff ? prevbno : gotbno;
2447                 else if (prevbno != NULLFSBLOCK)
2448                         ap->rval = prevbno;
2449                 else if (gotbno != NULLFSBLOCK)
2450                         ap->rval = gotbno;
2451         }
2452 #undef ISVALID
2453 }
2454
2455 STATIC int
2456 xfs_bmap_rtalloc(
2457         xfs_bmalloca_t  *ap)            /* bmap alloc argument struct */
2458 {
2459         xfs_alloctype_t atype = 0;      /* type for allocation routines */
2460         int             error;          /* error return value */
2461         xfs_mount_t     *mp;            /* mount point structure */
2462         xfs_extlen_t    prod = 0;       /* product factor for allocators */
2463         xfs_extlen_t    ralen = 0;      /* realtime allocation length */
2464         xfs_extlen_t    align;          /* minimum allocation alignment */
2465         xfs_rtblock_t   rtb;
2466
2467         mp = ap->ip->i_mount;
2468         align = xfs_get_extsz_hint(ap->ip);
2469         prod = align / mp->m_sb.sb_rextsize;
2470         error = xfs_bmap_extsize_align(mp, ap->gotp, ap->prevp,
2471                                         align, 1, ap->eof, 0,
2472                                         ap->conv, &ap->off, &ap->alen);
2473         if (error)
2474                 return error;
2475         ASSERT(ap->alen);
2476         ASSERT(ap->alen % mp->m_sb.sb_rextsize == 0);
2477
2478         /*
2479          * If the offset & length are not perfectly aligned
2480          * then kill prod, it will just get us in trouble.
2481          */
2482         if (do_mod(ap->off, align) || ap->alen % align)
2483                 prod = 1;
2484         /*
2485          * Set ralen to be the actual requested length in rtextents.
2486          */
2487         ralen = ap->alen / mp->m_sb.sb_rextsize;
2488         /*
2489          * If the old value was close enough to MAXEXTLEN that
2490          * we rounded up to it, cut it back so it's valid again.
2491          * Note that if it's a really large request (bigger than
2492          * MAXEXTLEN), we don't hear about that number, and can't
2493          * adjust the starting point to match it.
2494          */
2495         if (ralen * mp->m_sb.sb_rextsize >= MAXEXTLEN)
2496                 ralen = MAXEXTLEN / mp->m_sb.sb_rextsize;
2497         /*
2498          * If it's an allocation to an empty file at offset 0,
2499          * pick an extent that will space things out in the rt area.
2500          */
2501         if (ap->eof && ap->off == 0) {
2502                 xfs_rtblock_t uninitialized_var(rtx); /* realtime extent no */
2503
2504                 error = xfs_rtpick_extent(mp, ap->tp, ralen, &rtx);
2505                 if (error)
2506                         return error;
2507                 ap->rval = rtx * mp->m_sb.sb_rextsize;
2508         } else {
2509                 ap->rval = 0;
2510         }
2511
2512         xfs_bmap_adjacent(ap);
2513
2514         /*
2515          * Realtime allocation, done through xfs_rtallocate_extent.
2516          */
2517         atype = ap->rval == 0 ?  XFS_ALLOCTYPE_ANY_AG : XFS_ALLOCTYPE_NEAR_BNO;
2518         do_div(ap->rval, mp->m_sb.sb_rextsize);
2519         rtb = ap->rval;
2520         ap->alen = ralen;
2521         if ((error = xfs_rtallocate_extent(ap->tp, ap->rval, 1, ap->alen,
2522                                 &ralen, atype, ap->wasdel, prod, &rtb)))
2523                 return error;
2524         if (rtb == NULLFSBLOCK && prod > 1 &&
2525             (error = xfs_rtallocate_extent(ap->tp, ap->rval, 1,
2526                                            ap->alen, &ralen, atype,
2527                                            ap->wasdel, 1, &rtb)))
2528                 return error;
2529         ap->rval = rtb;
2530         if (ap->rval != NULLFSBLOCK) {
2531                 ap->rval *= mp->m_sb.sb_rextsize;
2532                 ralen *= mp->m_sb.sb_rextsize;
2533                 ap->alen = ralen;
2534                 ap->ip->i_d.di_nblocks += ralen;
2535                 xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
2536                 if (ap->wasdel)
2537                         ap->ip->i_delayed_blks -= ralen;
2538                 /*
2539                  * Adjust the disk quota also. This was reserved
2540                  * earlier.
2541                  */
2542                 xfs_trans_mod_dquot_byino(ap->tp, ap->ip,
2543                         ap->wasdel ? XFS_TRANS_DQ_DELRTBCOUNT :
2544                                         XFS_TRANS_DQ_RTBCOUNT, (long) ralen);
2545         } else {
2546                 ap->alen = 0;
2547         }
2548         return 0;
2549 }
2550
2551 STATIC int
2552 xfs_bmap_btalloc_nullfb(
2553         struct xfs_bmalloca     *ap,
2554         struct xfs_alloc_arg    *args,
2555         xfs_extlen_t            *blen)
2556 {
2557         struct xfs_mount        *mp = ap->ip->i_mount;
2558         struct xfs_perag        *pag;
2559         xfs_agnumber_t          ag, startag;
2560         int                     notinit = 0;
2561         int                     error;
2562
2563         if (ap->userdata && xfs_inode_is_filestream(ap->ip))
2564                 args->type = XFS_ALLOCTYPE_NEAR_BNO;
2565         else
2566                 args->type = XFS_ALLOCTYPE_START_BNO;
2567         args->total = ap->total;
2568
2569         /*
2570          * Search for an allocation group with a single extent large enough
2571          * for the request.  If one isn't found, then adjust the minimum
2572          * allocation size to the largest space found.
2573          */
2574         startag = ag = XFS_FSB_TO_AGNO(mp, args->fsbno);
2575         if (startag == NULLAGNUMBER)
2576                 startag = ag = 0;
2577
2578         pag = xfs_perag_get(mp, ag);
2579         while (*blen < ap->alen) {
2580                 if (!pag->pagf_init) {
2581                         error = xfs_alloc_pagf_init(mp, args->tp, ag,
2582                                                     XFS_ALLOC_FLAG_TRYLOCK);
2583                         if (error) {
2584                                 xfs_perag_put(pag);
2585                                 return error;
2586                         }
2587                 }
2588
2589                 /*
2590                  * See xfs_alloc_fix_freelist...
2591                  */
2592                 if (pag->pagf_init) {
2593                         xfs_extlen_t    longest;
2594                         longest = xfs_alloc_longest_free_extent(mp, pag);
2595                         if (*blen < longest)
2596                                 *blen = longest;
2597                 } else
2598                         notinit = 1;
2599
2600                 if (xfs_inode_is_filestream(ap->ip)) {
2601                         if (*blen >= ap->alen)
2602                                 break;
2603
2604                         if (ap->userdata) {
2605                                 /*
2606                                  * If startag is an invalid AG, we've
2607                                  * come here once before and
2608                                  * xfs_filestream_new_ag picked the
2609                                  * best currently available.
2610                                  *
2611                                  * Don't continue looping, since we
2612                                  * could loop forever.
2613                                  */
2614                                 if (startag == NULLAGNUMBER)
2615                                         break;
2616
2617                                 error = xfs_filestream_new_ag(ap, &ag);
2618                                 xfs_perag_put(pag);
2619                                 if (error)
2620                                         return error;
2621
2622                                 /* loop again to set 'blen'*/
2623                                 startag = NULLAGNUMBER;
2624                                 pag = xfs_perag_get(mp, ag);
2625                                 continue;
2626                         }
2627                 }
2628                 if (++ag == mp->m_sb.sb_agcount)
2629                         ag = 0;
2630                 if (ag == startag)
2631                         break;
2632                 xfs_perag_put(pag);
2633                 pag = xfs_perag_get(mp, ag);
2634         }
2635         xfs_perag_put(pag);
2636
2637         /*
2638          * Since the above loop did a BUF_TRYLOCK, it is
2639          * possible that there is space for this request.
2640          */
2641         if (notinit || *blen < ap->minlen)
2642                 args->minlen = ap->minlen;
2643         /*
2644          * If the best seen length is less than the request
2645          * length, use the best as the minimum.
2646          */
2647         else if (*blen < ap->alen)
2648                 args->minlen = *blen;
2649         /*
2650          * Otherwise we've seen an extent as big as alen,
2651          * use that as the minimum.
2652          */
2653         else
2654                 args->minlen = ap->alen;
2655
2656         /*
2657          * set the failure fallback case to look in the selected
2658          * AG as the stream may have moved.
2659          */
2660         if (xfs_inode_is_filestream(ap->ip))
2661                 ap->rval = args->fsbno = XFS_AGB_TO_FSB(mp, ag, 0);
2662
2663         return 0;
2664 }
2665
2666 STATIC int
2667 xfs_bmap_btalloc(
2668         xfs_bmalloca_t  *ap)            /* bmap alloc argument struct */
2669 {
2670         xfs_mount_t     *mp;            /* mount point structure */
2671         xfs_alloctype_t atype = 0;      /* type for allocation routines */
2672         xfs_extlen_t    align;          /* minimum allocation alignment */
2673         xfs_agnumber_t  fb_agno;        /* ag number of ap->firstblock */
2674         xfs_agnumber_t  ag;
2675         xfs_alloc_arg_t args;
2676         xfs_extlen_t    blen;
2677         xfs_extlen_t    nextminlen = 0;
2678         int             nullfb;         /* true if ap->firstblock isn't set */
2679         int             isaligned;
2680         int             tryagain;
2681         int             error;
2682
2683         mp = ap->ip->i_mount;
2684         align = ap->userdata ? xfs_get_extsz_hint(ap->ip) : 0;
2685         if (unlikely(align)) {
2686                 error = xfs_bmap_extsize_align(mp, ap->gotp, ap->prevp,
2687                                                 align, 0, ap->eof, 0, ap->conv,
2688                                                 &ap->off, &ap->alen);
2689                 ASSERT(!error);
2690                 ASSERT(ap->alen);
2691         }
2692         nullfb = ap->firstblock == NULLFSBLOCK;
2693         fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, ap->firstblock);
2694         if (nullfb) {
2695                 if (ap->userdata && xfs_inode_is_filestream(ap->ip)) {
2696                         ag = xfs_filestream_lookup_ag(ap->ip);
2697                         ag = (ag != NULLAGNUMBER) ? ag : 0;
2698                         ap->rval = XFS_AGB_TO_FSB(mp, ag, 0);
2699                 } else {
2700                         ap->rval = XFS_INO_TO_FSB(mp, ap->ip->i_ino);
2701                 }
2702         } else
2703                 ap->rval = ap->firstblock;
2704
2705         xfs_bmap_adjacent(ap);
2706
2707         /*
2708          * If allowed, use ap->rval; otherwise must use firstblock since
2709          * it's in the right allocation group.
2710          */
2711         if (nullfb || XFS_FSB_TO_AGNO(mp, ap->rval) == fb_agno)
2712                 ;
2713         else
2714                 ap->rval = ap->firstblock;
2715         /*
2716          * Normal allocation, done through xfs_alloc_vextent.
2717          */
2718         tryagain = isaligned = 0;
2719         args.tp = ap->tp;
2720         args.mp = mp;
2721         args.fsbno = ap->rval;
2722         args.maxlen = MIN(ap->alen, mp->m_sb.sb_agblocks);
2723         args.firstblock = ap->firstblock;
2724         blen = 0;
2725         if (nullfb) {
2726                 error = xfs_bmap_btalloc_nullfb(ap, &args, &blen);
2727                 if (error)
2728                         return error;
2729         } else if (ap->low) {
2730                 if (xfs_inode_is_filestream(ap->ip))
2731                         args.type = XFS_ALLOCTYPE_FIRST_AG;
2732                 else
2733                         args.type = XFS_ALLOCTYPE_START_BNO;
2734                 args.total = args.minlen = ap->minlen;
2735         } else {
2736                 args.type = XFS_ALLOCTYPE_NEAR_BNO;
2737                 args.total = ap->total;
2738                 args.minlen = ap->minlen;
2739         }
2740         /* apply extent size hints if obtained earlier */
2741         if (unlikely(align)) {
2742                 args.prod = align;
2743                 if ((args.mod = (xfs_extlen_t)do_mod(ap->off, args.prod)))
2744                         args.mod = (xfs_extlen_t)(args.prod - args.mod);
2745         } else if (mp->m_sb.sb_blocksize >= PAGE_CACHE_SIZE) {
2746                 args.prod = 1;
2747                 args.mod = 0;
2748         } else {
2749                 args.prod = PAGE_CACHE_SIZE >> mp->m_sb.sb_blocklog;
2750                 if ((args.mod = (xfs_extlen_t)(do_mod(ap->off, args.prod))))
2751                         args.mod = (xfs_extlen_t)(args.prod - args.mod);
2752         }
2753         /*
2754          * If we are not low on available data blocks, and the
2755          * underlying logical volume manager is a stripe, and
2756          * the file offset is zero then try to allocate data
2757          * blocks on stripe unit boundary.
2758          * NOTE: ap->aeof is only set if the allocation length
2759          * is >= the stripe unit and the allocation offset is
2760          * at the end of file.
2761          */
2762         if (!ap->low && ap->aeof) {
2763                 if (!ap->off) {
2764                         args.alignment = mp->m_dalign;
2765                         atype = args.type;
2766                         isaligned = 1;
2767                         /*
2768                          * Adjust for alignment
2769                          */
2770                         if (blen > args.alignment && blen <= ap->alen)
2771                                 args.minlen = blen - args.alignment;
2772                         args.minalignslop = 0;
2773                 } else {
2774                         /*
2775                          * First try an exact bno allocation.
2776                          * If it fails then do a near or start bno
2777                          * allocation with alignment turned on.
2778                          */
2779                         atype = args.type;
2780                         tryagain = 1;
2781                         args.type = XFS_ALLOCTYPE_THIS_BNO;
2782                         args.alignment = 1;
2783                         /*
2784                          * Compute the minlen+alignment for the
2785                          * next case.  Set slop so that the value
2786                          * of minlen+alignment+slop doesn't go up
2787                          * between the calls.
2788                          */
2789                         if (blen > mp->m_dalign && blen <= ap->alen)
2790                                 nextminlen = blen - mp->m_dalign;
2791                         else
2792                                 nextminlen = args.minlen;
2793                         if (nextminlen + mp->m_dalign > args.minlen + 1)
2794                                 args.minalignslop =
2795                                         nextminlen + mp->m_dalign -
2796                                         args.minlen - 1;
2797                         else
2798                                 args.minalignslop = 0;
2799                 }
2800         } else {
2801                 args.alignment = 1;
2802                 args.minalignslop = 0;
2803         }
2804         args.minleft = ap->minleft;
2805         args.wasdel = ap->wasdel;
2806         args.isfl = 0;
2807         args.userdata = ap->userdata;
2808         if ((error = xfs_alloc_vextent(&args)))
2809                 return error;
2810         if (tryagain && args.fsbno == NULLFSBLOCK) {
2811                 /*
2812                  * Exact allocation failed. Now try with alignment
2813                  * turned on.
2814                  */
2815                 args.type = atype;
2816                 args.fsbno = ap->rval;
2817                 args.alignment = mp->m_dalign;
2818                 args.minlen = nextminlen;
2819                 args.minalignslop = 0;
2820                 isaligned = 1;
2821                 if ((error = xfs_alloc_vextent(&args)))
2822                         return error;
2823         }
2824         if (isaligned && args.fsbno == NULLFSBLOCK) {
2825                 /*
2826                  * allocation failed, so turn off alignment and
2827                  * try again.
2828                  */
2829                 args.type = atype;
2830                 args.fsbno = ap->rval;
2831                 args.alignment = 0;
2832                 if ((error = xfs_alloc_vextent(&args)))
2833                         return error;
2834         }
2835         if (args.fsbno == NULLFSBLOCK && nullfb &&
2836             args.minlen > ap->minlen) {
2837                 args.minlen = ap->minlen;
2838                 args.type = XFS_ALLOCTYPE_START_BNO;
2839                 args.fsbno = ap->rval;
2840                 if ((error = xfs_alloc_vextent(&args)))
2841                         return error;
2842         }
2843         if (args.fsbno == NULLFSBLOCK && nullfb) {
2844                 args.fsbno = 0;
2845                 args.type = XFS_ALLOCTYPE_FIRST_AG;
2846                 args.total = ap->minlen;
2847                 args.minleft = 0;
2848                 if ((error = xfs_alloc_vextent(&args)))
2849                         return error;
2850                 ap->low = 1;
2851         }
2852         if (args.fsbno != NULLFSBLOCK) {
2853                 ap->firstblock = ap->rval = args.fsbno;
2854                 ASSERT(nullfb || fb_agno == args.agno ||
2855                        (ap->low && fb_agno < args.agno));
2856                 ap->alen = args.len;
2857                 ap->ip->i_d.di_nblocks += args.len;
2858                 xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
2859                 if (ap->wasdel)
2860                         ap->ip->i_delayed_blks -= args.len;
2861                 /*
2862                  * Adjust the disk quota also. This was reserved
2863                  * earlier.
2864                  */
2865                 xfs_trans_mod_dquot_byino(ap->tp, ap->ip,
2866                         ap->wasdel ? XFS_TRANS_DQ_DELBCOUNT :
2867                                         XFS_TRANS_DQ_BCOUNT,
2868                         (long) args.len);
2869         } else {
2870                 ap->rval = NULLFSBLOCK;
2871                 ap->alen = 0;
2872         }
2873         return 0;
2874 }
2875
2876 /*
2877  * xfs_bmap_alloc is called by xfs_bmapi to allocate an extent for a file.
2878  * It figures out where to ask the underlying allocator to put the new extent.
2879  */
2880 STATIC int
2881 xfs_bmap_alloc(
2882         xfs_bmalloca_t  *ap)            /* bmap alloc argument struct */
2883 {
2884         if (XFS_IS_REALTIME_INODE(ap->ip) && ap->userdata)
2885                 return xfs_bmap_rtalloc(ap);
2886         return xfs_bmap_btalloc(ap);
2887 }
2888
2889 /*
2890  * Transform a btree format file with only one leaf node, where the
2891  * extents list will fit in the inode, into an extents format file.
2892  * Since the file extents are already in-core, all we have to do is
2893  * give up the space for the btree root and pitch the leaf block.
2894  */
2895 STATIC int                              /* error */
2896 xfs_bmap_btree_to_extents(
2897         xfs_trans_t             *tp,    /* transaction pointer */
2898         xfs_inode_t             *ip,    /* incore inode pointer */
2899         xfs_btree_cur_t         *cur,   /* btree cursor */
2900         int                     *logflagsp, /* inode logging flags */
2901         int                     whichfork)  /* data or attr fork */
2902 {
2903         /* REFERENCED */
2904         struct xfs_btree_block  *cblock;/* child btree block */
2905         xfs_fsblock_t           cbno;   /* child block number */
2906         xfs_buf_t               *cbp;   /* child block's buffer */
2907         int                     error;  /* error return value */
2908         xfs_ifork_t             *ifp;   /* inode fork data */
2909         xfs_mount_t             *mp;    /* mount point structure */
2910         __be64                  *pp;    /* ptr to block address */
2911         struct xfs_btree_block  *rblock;/* root btree block */
2912
2913         mp = ip->i_mount;
2914         ifp = XFS_IFORK_PTR(ip, whichfork);
2915         ASSERT(ifp->if_flags & XFS_IFEXTENTS);
2916         ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE);
2917         rblock = ifp->if_broot;
2918         ASSERT(be16_to_cpu(rblock->bb_level) == 1);
2919         ASSERT(be16_to_cpu(rblock->bb_numrecs) == 1);
2920         ASSERT(xfs_bmbt_maxrecs(mp, ifp->if_broot_bytes, 0) == 1);
2921         pp = XFS_BMAP_BROOT_PTR_ADDR(mp, rblock, 1, ifp->if_broot_bytes);
2922         cbno = be64_to_cpu(*pp);
2923         *logflagsp = 0;
2924 #ifdef DEBUG
2925         if ((error = xfs_btree_check_lptr(cur, cbno, 1)))
2926                 return error;
2927 #endif
2928         if ((error = xfs_btree_read_bufl(mp, tp, cbno, 0, &cbp,
2929                         XFS_BMAP_BTREE_REF)))
2930                 return error;
2931         cblock = XFS_BUF_TO_BLOCK(cbp);
2932         if ((error = xfs_btree_check_block(cur, cblock, 0, cbp)))
2933                 return error;
2934         xfs_bmap_add_free(cbno, 1, cur->bc_private.b.flist, mp);
2935         ip->i_d.di_nblocks--;
2936         xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L);
2937         xfs_trans_binval(tp, cbp);
2938         if (cur->bc_bufs[0] == cbp)
2939                 cur->bc_bufs[0] = NULL;
2940         xfs_iroot_realloc(ip, -1, whichfork);
2941         ASSERT(ifp->if_broot == NULL);
2942         ASSERT((ifp->if_flags & XFS_IFBROOT) == 0);
2943         XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS);
2944         *logflagsp = XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
2945         return 0;
2946 }
2947
2948 /*
2949  * Called by xfs_bmapi to update file extent records and the btree
2950  * after removing space (or undoing a delayed allocation).
2951  */
2952 STATIC int                              /* error */
2953 xfs_bmap_del_extent(
2954         xfs_inode_t             *ip,    /* incore inode pointer */
2955         xfs_trans_t             *tp,    /* current transaction pointer */
2956         xfs_extnum_t            idx,    /* extent number to update/delete */
2957         xfs_bmap_free_t         *flist, /* list of extents to be freed */
2958         xfs_btree_cur_t         *cur,   /* if null, not a btree */
2959         xfs_bmbt_irec_t         *del,   /* data to remove from extents */
2960         int                     *logflagsp, /* inode logging flags */
2961         xfs_extdelta_t          *delta, /* Change made to incore extents */
2962         int                     whichfork, /* data or attr fork */
2963         int                     rsvd)   /* OK to allocate reserved blocks */
2964 {
2965         xfs_filblks_t           da_new; /* new delay-alloc indirect blocks */
2966         xfs_filblks_t           da_old; /* old delay-alloc indirect blocks */
2967         xfs_fsblock_t           del_endblock=0; /* first block past del */
2968         xfs_fileoff_t           del_endoff;     /* first offset past del */
2969         int                     delay;  /* current block is delayed allocated */
2970         int                     do_fx;  /* free extent at end of routine */
2971         xfs_bmbt_rec_host_t     *ep;    /* current extent entry pointer */
2972         int                     error;  /* error return value */
2973         int                     flags;  /* inode logging flags */
2974         xfs_bmbt_irec_t         got;    /* current extent entry */
2975         xfs_fileoff_t           got_endoff;     /* first offset past got */
2976         int                     i;      /* temp state */
2977         xfs_ifork_t             *ifp;   /* inode fork pointer */
2978         xfs_mount_t             *mp;    /* mount structure */
2979         xfs_filblks_t           nblks;  /* quota/sb block count */
2980         xfs_bmbt_irec_t         new;    /* new record to be inserted */
2981         /* REFERENCED */
2982         uint                    qfield; /* quota field to update */
2983         xfs_filblks_t           temp;   /* for indirect length calculations */
2984         xfs_filblks_t           temp2;  /* for indirect length calculations */
2985         int                     state = 0;
2986
2987         XFS_STATS_INC(xs_del_exlist);
2988
2989         if (whichfork == XFS_ATTR_FORK)
2990                 state |= BMAP_ATTRFORK;
2991
2992         mp = ip->i_mount;
2993         ifp = XFS_IFORK_PTR(ip, whichfork);
2994         ASSERT((idx >= 0) && (idx < ifp->if_bytes /
2995                 (uint)sizeof(xfs_bmbt_rec_t)));
2996         ASSERT(del->br_blockcount > 0);
2997         ep = xfs_iext_get_ext(ifp, idx);
2998         xfs_bmbt_get_all(ep, &got);
2999         ASSERT(got.br_startoff <= del->br_startoff);
3000         del_endoff = del->br_startoff + del->br_blockcount;
3001         got_endoff = got.br_startoff + got.br_blockcount;
3002         ASSERT(got_endoff >= del_endoff);
3003         delay = isnullstartblock(got.br_startblock);
3004         ASSERT(isnullstartblock(del->br_startblock) == delay);
3005         flags = 0;
3006         qfield = 0;
3007         error = 0;
3008         /*
3009          * If deleting a real allocation, must free up the disk space.
3010          */
3011         if (!delay) {
3012                 flags = XFS_ILOG_CORE;
3013                 /*
3014                  * Realtime allocation.  Free it and record di_nblocks update.
3015                  */
3016                 if (whichfork == XFS_DATA_FORK && XFS_IS_REALTIME_INODE(ip)) {
3017                         xfs_fsblock_t   bno;
3018                         xfs_filblks_t   len;
3019
3020                         ASSERT(do_mod(del->br_blockcount,
3021                                       mp->m_sb.sb_rextsize) == 0);
3022                         ASSERT(do_mod(del->br_startblock,
3023                                       mp->m_sb.sb_rextsize) == 0);
3024                         bno = del->br_startblock;
3025                         len = del->br_blockcount;
3026                         do_div(bno, mp->m_sb.sb_rextsize);
3027                         do_div(len, mp->m_sb.sb_rextsize);
3028                         if ((error = xfs_rtfree_extent(ip->i_transp, bno,
3029                                         (xfs_extlen_t)len)))
3030                                 goto done;
3031                         do_fx = 0;
3032                         nblks = len * mp->m_sb.sb_rextsize;
3033                         qfield = XFS_TRANS_DQ_RTBCOUNT;
3034                 }
3035                 /*
3036                  * Ordinary allocation.
3037                  */
3038                 else {
3039                         do_fx = 1;
3040                         nblks = del->br_blockcount;
3041                         qfield = XFS_TRANS_DQ_BCOUNT;
3042                 }
3043                 /*
3044                  * Set up del_endblock and cur for later.
3045                  */
3046                 del_endblock = del->br_startblock + del->br_blockcount;
3047                 if (cur) {
3048                         if ((error = xfs_bmbt_lookup_eq(cur, got.br_startoff,
3049                                         got.br_startblock, got.br_blockcount,
3050                                         &i)))
3051                                 goto done;
3052                         XFS_WANT_CORRUPTED_GOTO(i == 1, done);
3053                 }
3054                 da_old = da_new = 0;
3055         } else {
3056                 da_old = startblockval(got.br_startblock);
3057                 da_new = 0;
3058                 nblks = 0;
3059                 do_fx = 0;
3060         }
3061         /*
3062          * Set flag value to use in switch statement.
3063          * Left-contig is 2, right-contig is 1.
3064          */
3065         switch (((got.br_startoff == del->br_startoff) << 1) |
3066                 (got_endoff == del_endoff)) {
3067         case 3:
3068                 /*
3069                  * Matches the whole extent.  Delete the entry.
3070                  */
3071                 xfs_iext_remove(ip, idx, 1,
3072                                 whichfork == XFS_ATTR_FORK ? BMAP_ATTRFORK : 0);
3073                 ifp->if_lastex = idx;
3074                 if (delay)
3075                         break;
3076                 XFS_IFORK_NEXT_SET(ip, whichfork,
3077                         XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
3078                 flags |= XFS_ILOG_CORE;
3079                 if (!cur) {
3080                         flags |= xfs_ilog_fext(whichfork);
3081                         break;
3082                 }
3083                 if ((error = xfs_btree_delete(cur, &i)))
3084                         goto done;
3085                 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
3086                 break;
3087
3088         case 2:
3089                 /*
3090                  * Deleting the first part of the extent.
3091                  */
3092                 trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_);
3093                 xfs_bmbt_set_startoff(ep, del_endoff);
3094                 temp = got.br_blockcount - del->br_blockcount;
3095                 xfs_bmbt_set_blockcount(ep, temp);
3096                 ifp->if_lastex = idx;
3097                 if (delay) {
3098                         temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
3099                                 da_old);
3100                         xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
3101                         trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
3102                         da_new = temp;
3103                         break;
3104                 }
3105                 xfs_bmbt_set_startblock(ep, del_endblock);
3106                 trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
3107                 if (!cur) {
3108                         flags |= xfs_ilog_fext(whichfork);
3109                         break;
3110                 }
3111                 if ((error = xfs_bmbt_update(cur, del_endoff, del_endblock,
3112                                 got.br_blockcount - del->br_blockcount,
3113                                 got.br_state)))
3114                         goto done;
3115                 break;
3116
3117         case 1:
3118                 /*
3119                  * Deleting the last part of the extent.
3120                  */
3121                 temp = got.br_blockcount - del->br_blockcount;
3122                 trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_);
3123                 xfs_bmbt_set_blockcount(ep, temp);
3124                 ifp->if_lastex = idx;
3125                 if (delay) {
3126                         temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
3127                                 da_old);
3128                         xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
3129                         trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
3130                         da_new = temp;
3131                         break;
3132                 }
3133                 trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
3134                 if (!cur) {
3135                         flags |= xfs_ilog_fext(whichfork);
3136                         break;
3137                 }
3138                 if ((error = xfs_bmbt_update(cur, got.br_startoff,
3139                                 got.br_startblock,
3140                                 got.br_blockcount - del->br_blockcount,
3141                                 got.br_state)))
3142                         goto done;
3143                 break;
3144
3145         case 0:
3146                 /*
3147                  * Deleting the middle of the extent.
3148                  */
3149                 temp = del->br_startoff - got.br_startoff;
3150                 trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_);
3151                 xfs_bmbt_set_blockcount(ep, temp);
3152                 new.br_startoff = del_endoff;
3153                 temp2 = got_endoff - del_endoff;
3154                 new.br_blockcount = temp2;
3155                 new.br_state = got.br_state;
3156                 if (!delay) {
3157                         new.br_startblock = del_endblock;
3158                         flags |= XFS_ILOG_CORE;
3159                         if (cur) {
3160                                 if ((error = xfs_bmbt_update(cur,
3161                                                 got.br_startoff,
3162                                                 got.br_startblock, temp,
3163                                                 got.br_state)))
3164                                         goto done;
3165                                 if ((error = xfs_btree_increment(cur, 0, &i)))
3166                                         goto done;
3167                                 cur->bc_rec.b = new;
3168                                 error = xfs_btree_insert(cur, &i);
3169                                 if (error && error != ENOSPC)
3170                                         goto done;
3171                                 /*
3172                                  * If get no-space back from btree insert,
3173                                  * it tried a split, and we have a zero
3174                                  * block reservation.
3175                                  * Fix up our state and return the error.
3176                                  */
3177                                 if (error == ENOSPC) {
3178                                         /*
3179                                          * Reset the cursor, don't trust
3180                                          * it after any insert operation.
3181                                          */
3182                                         if ((error = xfs_bmbt_lookup_eq(cur,
3183                                                         got.br_startoff,
3184                                                         got.br_startblock,
3185                                                         temp, &i)))
3186                                                 goto done;
3187                                         XFS_WANT_CORRUPTED_GOTO(i == 1, done);
3188                                         /*
3189                                          * Update the btree record back
3190                                          * to the original value.
3191                                          */
3192                                         if ((error = xfs_bmbt_update(cur,
3193                                                         got.br_startoff,
3194                                                         got.br_startblock,
3195                                                         got.br_blockcount,
3196                                                         got.br_state)))
3197                                                 goto done;
3198                                         /*
3199                                          * Reset the extent record back
3200                                          * to the original value.
3201                                          */
3202                                         xfs_bmbt_set_blockcount(ep,
3203                                                 got.br_blockcount);
3204                                         flags = 0;
3205                                         error = XFS_ERROR(ENOSPC);
3206                                         goto done;
3207                                 }
3208                                 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
3209                         } else
3210                                 flags |= xfs_ilog_fext(whichfork);
3211                         XFS_IFORK_NEXT_SET(ip, whichfork,
3212                                 XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
3213                 } else {
3214                         ASSERT(whichfork == XFS_DATA_FORK);
3215                         temp = xfs_bmap_worst_indlen(ip, temp);
3216                         xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
3217                         temp2 = xfs_bmap_worst_indlen(ip, temp2);
3218                         new.br_startblock = nullstartblock((int)temp2);
3219                         da_new = temp + temp2;
3220                         while (da_new > da_old) {
3221                                 if (temp) {
3222                                         temp--;
3223                                         da_new--;
3224                                         xfs_bmbt_set_startblock(ep,
3225                                                 nullstartblock((int)temp));
3226                                 }
3227                                 if (da_new == da_old)
3228                                         break;
3229                                 if (temp2) {
3230                                         temp2--;
3231                                         da_new--;
3232                                         new.br_startblock =
3233                                                 nullstartblock((int)temp2);
3234                                 }
3235                         }
3236                 }
3237                 trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
3238                 xfs_iext_insert(ip, idx + 1, 1, &new, state);
3239                 ifp->if_lastex = idx + 1;
3240                 break;
3241         }
3242         /*
3243          * If we need to, add to list of extents to delete.
3244          */
3245         if (do_fx)
3246                 xfs_bmap_add_free(del->br_startblock, del->br_blockcount, flist,
3247                         mp);
3248         /*
3249          * Adjust inode # blocks in the file.
3250          */
3251         if (nblks)
3252                 ip->i_d.di_nblocks -= nblks;
3253         /*
3254          * Adjust quota data.
3255          */
3256         if (qfield)
3257                 xfs_trans_mod_dquot_byino(tp, ip, qfield, (long)-nblks);
3258
3259         /*
3260          * Account for change in delayed indirect blocks.
3261          * Nothing to do for disk quota accounting here.
3262          */
3263         ASSERT(da_old >= da_new);
3264         if (da_old > da_new)
3265                 xfs_mod_incore_sb(mp, XFS_SBS_FDBLOCKS, (int64_t)(da_old - da_new),
3266                         rsvd);
3267         if (delta) {
3268                 /* DELTA: report the original extent. */
3269                 if (delta->xed_startoff > got.br_startoff)
3270                         delta->xed_startoff = got.br_startoff;
3271                 if (delta->xed_blockcount < got.br_startoff+got.br_blockcount)
3272                         delta->xed_blockcount = got.br_startoff +
3273                                                         got.br_blockcount;
3274         }
3275 done:
3276         *logflagsp = flags;
3277         return error;
3278 }
3279
3280 /*
3281  * Remove the entry "free" from the free item list.  Prev points to the
3282  * previous entry, unless "free" is the head of the list.
3283  */
3284 STATIC void
3285 xfs_bmap_del_free(
3286         xfs_bmap_free_t         *flist, /* free item list header */
3287         xfs_bmap_free_item_t    *prev,  /* previous item on list, if any */
3288         xfs_bmap_free_item_t    *free)  /* list item to be freed */
3289 {
3290         if (prev)
3291                 prev->xbfi_next = free->xbfi_next;
3292         else
3293                 flist->xbf_first = free->xbfi_next;
3294         flist->xbf_count--;
3295         kmem_zone_free(xfs_bmap_free_item_zone, free);
3296 }
3297
3298 /*
3299  * Convert an extents-format file into a btree-format file.
3300  * The new file will have a root block (in the inode) and a single child block.
3301  */
3302 STATIC int                                      /* error */
3303 xfs_bmap_extents_to_btree(
3304         xfs_trans_t             *tp,            /* transaction pointer */
3305         xfs_inode_t             *ip,            /* incore inode pointer */
3306         xfs_fsblock_t           *firstblock,    /* first-block-allocated */
3307         xfs_bmap_free_t         *flist,         /* blocks freed in xaction */
3308         xfs_btree_cur_t         **curp,         /* cursor returned to caller */
3309         int                     wasdel,         /* converting a delayed alloc */
3310         int                     *logflagsp,     /* inode logging flags */
3311         int                     whichfork)      /* data or attr fork */
3312 {
3313         struct xfs_btree_block  *ablock;        /* allocated (child) bt block */
3314         xfs_buf_t               *abp;           /* buffer for ablock */
3315         xfs_alloc_arg_t         args;           /* allocation arguments */
3316         xfs_bmbt_rec_t          *arp;           /* child record pointer */
3317         struct xfs_btree_block  *block;         /* btree root block */
3318         xfs_btree_cur_t         *cur;           /* bmap btree cursor */
3319         xfs_bmbt_rec_host_t     *ep;            /* extent record pointer */
3320         int                     error;          /* error return value */
3321         xfs_extnum_t            i, cnt;         /* extent record index */
3322         xfs_ifork_t             *ifp;           /* inode fork pointer */
3323         xfs_bmbt_key_t          *kp;            /* root block key pointer */
3324         xfs_mount_t             *mp;            /* mount structure */
3325         xfs_extnum_t            nextents;       /* number of file extents */
3326         xfs_bmbt_ptr_t          *pp;            /* root block address pointer */
3327
3328         ifp = XFS_IFORK_PTR(ip, whichfork);
3329         ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS);
3330         ASSERT(ifp->if_ext_max ==
3331                XFS_IFORK_SIZE(ip, whichfork) / (uint)sizeof(xfs_bmbt_rec_t));
3332         /*
3333          * Make space in the inode incore.
3334          */
3335         xfs_iroot_realloc(ip, 1, whichfork);
3336         ifp->if_flags |= XFS_IFBROOT;
3337
3338         /*
3339          * Fill in the root.
3340          */
3341         block = ifp->if_broot;
3342         block->bb_magic = cpu_to_be32(XFS_BMAP_MAGIC);
3343         block->bb_level = cpu_to_be16(1);
3344         block->bb_numrecs = cpu_to_be16(1);
3345         block->bb_u.l.bb_leftsib = cpu_to_be64(NULLDFSBNO);
3346         block->bb_u.l.bb_rightsib = cpu_to_be64(NULLDFSBNO);
3347
3348         /*
3349          * Need a cursor.  Can't allocate until bb_level is filled in.
3350          */
3351         mp = ip->i_mount;
3352         cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
3353         cur->bc_private.b.firstblock = *firstblock;
3354         cur->bc_private.b.flist = flist;
3355         cur->bc_private.b.flags = wasdel ? XFS_BTCUR_BPRV_WASDEL : 0;
3356         /*
3357          * Convert to a btree with two levels, one record in root.
3358          */
3359         XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_BTREE);
3360         args.tp = tp;
3361         args.mp = mp;
3362         args.firstblock = *firstblock;
3363         if (*firstblock == NULLFSBLOCK) {
3364                 args.type = XFS_ALLOCTYPE_START_BNO;
3365                 args.fsbno = XFS_INO_TO_FSB(mp, ip->i_ino);
3366         } else if (flist->xbf_low) {
3367                 args.type = XFS_ALLOCTYPE_START_BNO;
3368                 args.fsbno = *firstblock;
3369         } else {
3370                 args.type = XFS_ALLOCTYPE_NEAR_BNO;
3371                 args.fsbno = *firstblock;
3372         }
3373         args.minlen = args.maxlen = args.prod = 1;
3374         args.total = args.minleft = args.alignment = args.mod = args.isfl =
3375                 args.minalignslop = 0;
3376         args.wasdel = wasdel;
3377         *logflagsp = 0;
3378         if ((error = xfs_alloc_vextent(&args))) {
3379                 xfs_iroot_realloc(ip, -1, whichfork);
3380                 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
3381                 return error;
3382         }
3383         /*
3384          * Allocation can't fail, the space was reserved.
3385          */
3386         ASSERT(args.fsbno != NULLFSBLOCK);
3387         ASSERT(*firstblock == NULLFSBLOCK ||
3388                args.agno == XFS_FSB_TO_AGNO(mp, *firstblock) ||
3389                (flist->xbf_low &&
3390                 args.agno > XFS_FSB_TO_AGNO(mp, *firstblock)));
3391         *firstblock = cur->bc_private.b.firstblock = args.fsbno;
3392         cur->bc_private.b.allocated++;
3393         ip->i_d.di_nblocks++;
3394         xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, 1L);
3395         abp = xfs_btree_get_bufl(mp, tp, args.fsbno, 0);
3396         /*
3397          * Fill in the child block.
3398          */
3399         ablock = XFS_BUF_TO_BLOCK(abp);
3400         ablock->bb_magic = cpu_to_be32(XFS_BMAP_MAGIC);
3401         ablock->bb_level = 0;
3402         ablock->bb_u.l.bb_leftsib = cpu_to_be64(NULLDFSBNO);
3403         ablock->bb_u.l.bb_rightsib = cpu_to_be64(NULLDFSBNO);
3404         arp = XFS_BMBT_REC_ADDR(mp, ablock, 1);
3405         nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
3406         for (cnt = i = 0; i < nextents; i++) {
3407                 ep = xfs_iext_get_ext(ifp, i);
3408                 if (!isnullstartblock(xfs_bmbt_get_startblock(ep))) {
3409                         arp->l0 = cpu_to_be64(ep->l0);
3410                         arp->l1 = cpu_to_be64(ep->l1);
3411                         arp++; cnt++;
3412                 }
3413         }
3414         ASSERT(cnt == XFS_IFORK_NEXTENTS(ip, whichfork));
3415         xfs_btree_set_numrecs(ablock, cnt);
3416
3417         /*
3418          * Fill in the root key and pointer.
3419          */
3420         kp = XFS_BMBT_KEY_ADDR(mp, block, 1);
3421         arp = XFS_BMBT_REC_ADDR(mp, ablock, 1);
3422         kp->br_startoff = cpu_to_be64(xfs_bmbt_disk_get_startoff(arp));
3423         pp = XFS_BMBT_PTR_ADDR(mp, block, 1, xfs_bmbt_get_maxrecs(cur,
3424                                                 be16_to_cpu(block->bb_level)));
3425         *pp = cpu_to_be64(args.fsbno);
3426
3427         /*
3428          * Do all this logging at the end so that
3429          * the root is at the right level.
3430          */
3431         xfs_btree_log_block(cur, abp, XFS_BB_ALL_BITS);
3432         xfs_btree_log_recs(cur, abp, 1, be16_to_cpu(ablock->bb_numrecs));
3433         ASSERT(*curp == NULL);
3434         *curp = cur;
3435         *logflagsp = XFS_ILOG_CORE | xfs_ilog_fbroot(whichfork);
3436         return 0;
3437 }
3438
3439 /*
3440  * Calculate the default attribute fork offset for newly created inodes.
3441  */
3442 uint
3443 xfs_default_attroffset(
3444         struct xfs_inode        *ip)
3445 {
3446         struct xfs_mount        *mp = ip->i_mount;
3447         uint                    offset;
3448
3449         if (mp->m_sb.sb_inodesize == 256) {
3450                 offset = XFS_LITINO(mp) -
3451                                 XFS_BMDR_SPACE_CALC(MINABTPTRS);
3452         } else {
3453                 offset = XFS_BMDR_SPACE_CALC(6 * MINABTPTRS);
3454         }
3455
3456         ASSERT(offset < XFS_LITINO(mp));
3457         return offset;
3458 }
3459
3460 /*
3461  * Helper routine to reset inode di_forkoff field when switching
3462  * attribute fork from local to extent format - we reset it where
3463  * possible to make space available for inline data fork extents.
3464  */
3465 STATIC void
3466 xfs_bmap_forkoff_reset(
3467         xfs_mount_t     *mp,
3468         xfs_inode_t     *ip,
3469         int             whichfork)
3470 {
3471         if (whichfork == XFS_ATTR_FORK &&
3472             ip->i_d.di_format != XFS_DINODE_FMT_DEV &&
3473             ip->i_d.di_format != XFS_DINODE_FMT_UUID &&
3474             ip->i_d.di_format != XFS_DINODE_FMT_BTREE) {
3475                 uint    dfl_forkoff = xfs_default_attroffset(ip) >> 3;
3476
3477                 if (dfl_forkoff > ip->i_d.di_forkoff) {
3478                         ip->i_d.di_forkoff = dfl_forkoff;
3479                         ip->i_df.if_ext_max =
3480                                 XFS_IFORK_DSIZE(ip) / sizeof(xfs_bmbt_rec_t);
3481                         ip->i_afp->if_ext_max =
3482                                 XFS_IFORK_ASIZE(ip) / sizeof(xfs_bmbt_rec_t);
3483                 }
3484         }
3485 }
3486
3487 /*
3488  * Convert a local file to an extents file.
3489  * This code is out of bounds for data forks of regular files,
3490  * since the file data needs to get logged so things will stay consistent.
3491  * (The bmap-level manipulations are ok, though).
3492  */
3493 STATIC int                              /* error */
3494 xfs_bmap_local_to_extents(
3495         xfs_trans_t     *tp,            /* transaction pointer */
3496         xfs_inode_t     *ip,            /* incore inode pointer */
3497         xfs_fsblock_t   *firstblock,    /* first block allocated in xaction */
3498         xfs_extlen_t    total,          /* total blocks needed by transaction */
3499         int             *logflagsp,     /* inode logging flags */
3500         int             whichfork)      /* data or attr fork */
3501 {
3502         int             error;          /* error return value */
3503         int             flags;          /* logging flags returned */
3504         xfs_ifork_t     *ifp;           /* inode fork pointer */
3505
3506         /*
3507          * We don't want to deal with the case of keeping inode data inline yet.
3508          * So sending the data fork of a regular inode is invalid.
3509          */
3510         ASSERT(!((ip->i_d.di_mode & S_IFMT) == S_IFREG &&
3511                  whichfork == XFS_DATA_FORK));
3512         ifp = XFS_IFORK_PTR(ip, whichfork);
3513         ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL);
3514         flags = 0;
3515         error = 0;
3516         if (ifp->if_bytes) {
3517                 xfs_alloc_arg_t args;   /* allocation arguments */
3518                 xfs_buf_t       *bp;    /* buffer for extent block */
3519                 xfs_bmbt_rec_host_t *ep;/* extent record pointer */
3520
3521                 args.tp = tp;
3522                 args.mp = ip->i_mount;
3523                 args.firstblock = *firstblock;
3524                 ASSERT((ifp->if_flags &
3525                         (XFS_IFINLINE|XFS_IFEXTENTS|XFS_IFEXTIREC)) == XFS_IFINLINE);
3526                 /*
3527                  * Allocate a block.  We know we need only one, since the
3528                  * file currently fits in an inode.
3529                  */
3530                 if (*firstblock == NULLFSBLOCK) {
3531                         args.fsbno = XFS_INO_TO_FSB(args.mp, ip->i_ino);
3532                         args.type = XFS_ALLOCTYPE_START_BNO;
3533                 } else {
3534                         args.fsbno = *firstblock;
3535                         args.type = XFS_ALLOCTYPE_NEAR_BNO;
3536                 }
3537                 args.total = total;
3538                 args.mod = args.minleft = args.alignment = args.wasdel =
3539                         args.isfl = args.minalignslop = 0;
3540                 args.minlen = args.maxlen = args.prod = 1;
3541                 if ((error = xfs_alloc_vextent(&args)))
3542                         goto done;
3543                 /*
3544                  * Can't fail, the space was reserved.
3545                  */
3546                 ASSERT(args.fsbno != NULLFSBLOCK);
3547                 ASSERT(args.len == 1);
3548                 *firstblock = args.fsbno;
3549                 bp = xfs_btree_get_bufl(args.mp, tp, args.fsbno, 0);
3550                 memcpy((char *)XFS_BUF_PTR(bp), ifp->if_u1.if_data,
3551                         ifp->if_bytes);
3552                 xfs_trans_log_buf(tp, bp, 0, ifp->if_bytes - 1);
3553                 xfs_bmap_forkoff_reset(args.mp, ip, whichfork);
3554                 xfs_idata_realloc(ip, -ifp->if_bytes, whichfork);
3555                 xfs_iext_add(ifp, 0, 1);
3556                 ep = xfs_iext_get_ext(ifp, 0);
3557                 xfs_bmbt_set_allf(ep, 0, args.fsbno, 1, XFS_EXT_NORM);
3558                 trace_xfs_bmap_post_update(ip, 0,
3559                                 whichfork == XFS_ATTR_FORK ? BMAP_ATTRFORK : 0,
3560                                 _THIS_IP_);
3561                 XFS_IFORK_NEXT_SET(ip, whichfork, 1);
3562                 ip->i_d.di_nblocks = 1;
3563                 xfs_trans_mod_dquot_byino(tp, ip,
3564                         XFS_TRANS_DQ_BCOUNT, 1L);
3565                 flags |= xfs_ilog_fext(whichfork);
3566         } else {
3567                 ASSERT(XFS_IFORK_NEXTENTS(ip, whichfork) == 0);
3568                 xfs_bmap_forkoff_reset(ip->i_mount, ip, whichfork);
3569         }
3570         ifp->if_flags &= ~XFS_IFINLINE;
3571         ifp->if_flags |= XFS_IFEXTENTS;
3572         XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS);
3573         flags |= XFS_ILOG_CORE;
3574 done:
3575         *logflagsp = flags;
3576         return error;
3577 }
3578
3579 /*
3580  * Search the extent records for the entry containing block bno.
3581  * If bno lies in a hole, point to the next entry.  If bno lies
3582  * past eof, *eofp will be set, and *prevp will contain the last
3583  * entry (null if none).  Else, *lastxp will be set to the index
3584  * of the found entry; *gotp will contain the entry.
3585  */
3586 STATIC xfs_bmbt_rec_host_t *            /* pointer to found extent entry */
3587 xfs_bmap_search_multi_extents(
3588         xfs_ifork_t     *ifp,           /* inode fork pointer */
3589         xfs_fileoff_t   bno,            /* block number searched for */
3590         int             *eofp,          /* out: end of file found */
3591         xfs_extnum_t    *lastxp,        /* out: last extent index */
3592         xfs_bmbt_irec_t *gotp,          /* out: extent entry found */
3593         xfs_bmbt_irec_t *prevp)         /* out: previous extent entry found */
3594 {
3595         xfs_bmbt_rec_host_t *ep;                /* extent record pointer */
3596         xfs_extnum_t    lastx;          /* last extent index */
3597
3598         /*
3599          * Initialize the extent entry structure to catch access to
3600          * uninitialized br_startblock field.
3601          */
3602         gotp->br_startoff = 0xffa5a5a5a5a5a5a5LL;
3603         gotp->br_blockcount = 0xa55a5a5a5a5a5a5aLL;
3604         gotp->br_state = XFS_EXT_INVALID;
3605 #if XFS_BIG_BLKNOS
3606         gotp->br_startblock = 0xffffa5a5a5a5a5a5LL;
3607 #else
3608         gotp->br_startblock = 0xffffa5a5;
3609 #endif
3610         prevp->br_startoff = NULLFILEOFF;
3611
3612         ep = xfs_iext_bno_to_ext(ifp, bno, &lastx);
3613         if (lastx > 0) {
3614                 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, lastx - 1), prevp);
3615         }
3616         if (lastx < (ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t))) {
3617                 xfs_bmbt_get_all(ep, gotp);
3618                 *eofp = 0;
3619         } else {
3620                 if (lastx > 0) {
3621                         *gotp = *prevp;
3622                 }
3623                 *eofp = 1;
3624                 ep = NULL;
3625         }
3626         *lastxp = lastx;
3627         return ep;
3628 }
3629
3630 /*
3631  * Search the extents list for the inode, for the extent containing bno.
3632  * If bno lies in a hole, point to the next entry.  If bno lies past eof,
3633  * *eofp will be set, and *prevp will contain the last entry (null if none).
3634  * Else, *lastxp will be set to the index of the found
3635  * entry; *gotp will contain the entry.
3636  */
3637 STATIC xfs_bmbt_rec_host_t *                 /* pointer to found extent entry */
3638 xfs_bmap_search_extents(
3639         xfs_inode_t     *ip,            /* incore inode pointer */
3640         xfs_fileoff_t   bno,            /* block number searched for */
3641         int             fork,           /* data or attr fork */
3642         int             *eofp,          /* out: end of file found */
3643         xfs_extnum_t    *lastxp,        /* out: last extent index */
3644         xfs_bmbt_irec_t *gotp,          /* out: extent entry found */
3645         xfs_bmbt_irec_t *prevp)         /* out: previous extent entry found */
3646 {
3647         xfs_ifork_t     *ifp;           /* inode fork pointer */
3648         xfs_bmbt_rec_host_t  *ep;            /* extent record pointer */
3649
3650         XFS_STATS_INC(xs_look_exlist);
3651         ifp = XFS_IFORK_PTR(ip, fork);
3652
3653         ep = xfs_bmap_search_multi_extents(ifp, bno, eofp, lastxp, gotp, prevp);
3654
3655         if (unlikely(!(gotp->br_startblock) && (*lastxp != NULLEXTNUM) &&
3656                      !(XFS_IS_REALTIME_INODE(ip) && fork == XFS_DATA_FORK))) {
3657                 xfs_cmn_err(XFS_PTAG_FSBLOCK_ZERO, CE_ALERT, ip->i_mount,
3658                                 "Access to block zero in inode %llu "
3659                                 "start_block: %llx start_off: %llx "
3660                                 "blkcnt: %llx extent-state: %x lastx: %x\n",
3661                         (unsigned long long)ip->i_ino,
3662                         (unsigned long long)gotp->br_startblock,
3663                         (unsigned long long)gotp->br_startoff,
3664                         (unsigned long long)gotp->br_blockcount,
3665                         gotp->br_state, *lastxp);
3666                 *lastxp = NULLEXTNUM;
3667                 *eofp = 1;
3668                 return NULL;
3669         }
3670         return ep;
3671 }
3672
3673 /*
3674  * Compute the worst-case number of indirect blocks that will be used
3675  * for ip's delayed extent of length "len".
3676  */
3677 STATIC xfs_filblks_t
3678 xfs_bmap_worst_indlen(
3679         xfs_inode_t     *ip,            /* incore inode pointer */
3680         xfs_filblks_t   len)            /* delayed extent length */
3681 {
3682         int             level;          /* btree level number */
3683         int             maxrecs;        /* maximum record count at this level */
3684         xfs_mount_t     *mp;            /* mount structure */
3685         xfs_filblks_t   rval;           /* return value */
3686
3687         mp = ip->i_mount;
3688         maxrecs = mp->m_bmap_dmxr[0];
3689         for (level = 0, rval = 0;
3690              level < XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK);
3691              level++) {
3692                 len += maxrecs - 1;
3693                 do_div(len, maxrecs);
3694                 rval += len;
3695                 if (len == 1)
3696                         return rval + XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) -
3697                                 level - 1;
3698                 if (level == 0)
3699                         maxrecs = mp->m_bmap_dmxr[1];
3700         }
3701         return rval;
3702 }
3703
3704 /*
3705  * Convert inode from non-attributed to attributed.
3706  * Must not be in a transaction, ip must not be locked.
3707  */
3708 int                                             /* error code */
3709 xfs_bmap_add_attrfork(
3710         xfs_inode_t             *ip,            /* incore inode pointer */
3711         int                     size,           /* space new attribute needs */
3712         int                     rsvd)           /* xact may use reserved blks */
3713 {
3714         xfs_fsblock_t           firstblock;     /* 1st block/ag allocated */
3715         xfs_bmap_free_t         flist;          /* freed extent records */
3716         xfs_mount_t             *mp;            /* mount structure */
3717         xfs_trans_t             *tp;            /* transaction pointer */
3718         int                     blks;           /* space reservation */
3719         int                     version = 1;    /* superblock attr version */
3720         int                     committed;      /* xaction was committed */
3721         int                     logflags;       /* logging flags */
3722         int                     error;          /* error return value */
3723
3724         ASSERT(XFS_IFORK_Q(ip) == 0);
3725         ASSERT(ip->i_df.if_ext_max ==
3726                XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t));
3727
3728         mp = ip->i_mount;
3729         ASSERT(!XFS_NOT_DQATTACHED(mp, ip));
3730         tp = xfs_trans_alloc(mp, XFS_TRANS_ADDAFORK);
3731         blks = XFS_ADDAFORK_SPACE_RES(mp);
3732         if (rsvd)
3733                 tp->t_flags |= XFS_TRANS_RESERVE;
3734         if ((error = xfs_trans_reserve(tp, blks, XFS_ADDAFORK_LOG_RES(mp), 0,
3735                         XFS_TRANS_PERM_LOG_RES, XFS_ADDAFORK_LOG_COUNT)))
3736                 goto error0;
3737         xfs_ilock(ip, XFS_ILOCK_EXCL);
3738         error = xfs_trans_reserve_quota_nblks(tp, ip, blks, 0, rsvd ?
3739                         XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_FORCE_RES :
3740                         XFS_QMOPT_RES_REGBLKS);
3741         if (error) {
3742                 xfs_iunlock(ip, XFS_ILOCK_EXCL);
3743                 xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES);
3744                 return error;
3745         }
3746         if (XFS_IFORK_Q(ip))
3747                 goto error1;
3748         if (ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS) {
3749                 /*
3750                  * For inodes coming from pre-6.2 filesystems.
3751                  */
3752                 ASSERT(ip->i_d.di_aformat == 0);
3753                 ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
3754         }
3755         ASSERT(ip->i_d.di_anextents == 0);
3756         IHOLD(ip);
3757         xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
3758         xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
3759         switch (ip->i_d.di_format) {
3760         case XFS_DINODE_FMT_DEV:
3761                 ip->i_d.di_forkoff = roundup(sizeof(xfs_dev_t), 8) >> 3;
3762                 break;
3763         case XFS_DINODE_FMT_UUID:
3764                 ip->i_d.di_forkoff = roundup(sizeof(uuid_t), 8) >> 3;
3765                 break;
3766         case XFS_DINODE_FMT_LOCAL:
3767         case XFS_DINODE_FMT_EXTENTS:
3768         case XFS_DINODE_FMT_BTREE:
3769                 ip->i_d.di_forkoff = xfs_attr_shortform_bytesfit(ip, size);
3770                 if (!ip->i_d.di_forkoff)
3771                         ip->i_d.di_forkoff = xfs_default_attroffset(ip) >> 3;
3772                 else if (mp->m_flags & XFS_MOUNT_ATTR2)
3773                         version = 2;
3774                 break;
3775         default:
3776                 ASSERT(0);
3777                 error = XFS_ERROR(EINVAL);
3778                 goto error1;
3779         }
3780         ip->i_df.if_ext_max =
3781                 XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t);
3782         ASSERT(ip->i_afp == NULL);
3783         ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, KM_SLEEP);
3784         ip->i_afp->if_ext_max =
3785                 XFS_IFORK_ASIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t);
3786         ip->i_afp->if_flags = XFS_IFEXTENTS;
3787         logflags = 0;
3788         xfs_bmap_init(&flist, &firstblock);
3789         switch (ip->i_d.di_format) {
3790         case XFS_DINODE_FMT_LOCAL:
3791                 error = xfs_bmap_add_attrfork_local(tp, ip, &firstblock, &flist,
3792                         &logflags);
3793                 break;
3794         case XFS_DINODE_FMT_EXTENTS:
3795                 error = xfs_bmap_add_attrfork_extents(tp, ip, &firstblock,
3796                         &flist, &logflags);
3797                 break;
3798         case XFS_DINODE_FMT_BTREE:
3799                 error = xfs_bmap_add_attrfork_btree(tp, ip, &firstblock, &flist,
3800                         &logflags);
3801                 break;
3802         default:
3803                 error = 0;
3804                 break;
3805         }
3806         if (logflags)
3807                 xfs_trans_log_inode(tp, ip, logflags);
3808         if (error)
3809                 goto error2;
3810         if (!xfs_sb_version_hasattr(&mp->m_sb) ||
3811            (!xfs_sb_version_hasattr2(&mp->m_sb) && version == 2)) {
3812                 __int64_t sbfields = 0;
3813
3814                 spin_lock(&mp->m_sb_lock);
3815                 if (!xfs_sb_version_hasattr(&mp->m_sb)) {
3816                         xfs_sb_version_addattr(&mp->m_sb);
3817                         sbfields |= XFS_SB_VERSIONNUM;
3818                 }
3819                 if (!xfs_sb_version_hasattr2(&mp->m_sb) && version == 2) {
3820                         xfs_sb_version_addattr2(&mp->m_sb);
3821                         sbfields |= (XFS_SB_VERSIONNUM | XFS_SB_FEATURES2);
3822                 }
3823                 if (sbfields) {
3824                         spin_unlock(&mp->m_sb_lock);
3825                         xfs_mod_sb(tp, sbfields);
3826                 } else
3827                         spin_unlock(&mp->m_sb_lock);
3828         }
3829         if ((error = xfs_bmap_finish(&tp, &flist, &committed)))
3830                 goto error2;
3831         error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
3832         ASSERT(ip->i_df.if_ext_max ==
3833                XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t));
3834         return error;
3835 error2:
3836         xfs_bmap_cancel(&flist);
3837 error1:
3838         xfs_iunlock(ip, XFS_ILOCK_EXCL);
3839 error0:
3840         xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES|XFS_TRANS_ABORT);
3841         ASSERT(ip->i_df.if_ext_max ==
3842                XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t));
3843         return error;
3844 }
3845
3846 /*
3847  * Add the extent to the list of extents to be free at transaction end.
3848  * The list is maintained sorted (by block number).
3849  */
3850 /* ARGSUSED */
3851 void
3852 xfs_bmap_add_free(
3853         xfs_fsblock_t           bno,            /* fs block number of extent */
3854         xfs_filblks_t           len,            /* length of extent */
3855         xfs_bmap_free_t         *flist,         /* list of extents */
3856         xfs_mount_t             *mp)            /* mount point structure */
3857 {
3858         xfs_bmap_free_item_t    *cur;           /* current (next) element */
3859         xfs_bmap_free_item_t    *new;           /* new element */
3860         xfs_bmap_free_item_t    *prev;          /* previous element */
3861 #ifdef DEBUG
3862         xfs_agnumber_t          agno;
3863         xfs_agblock_t           agbno;
3864
3865         ASSERT(bno != NULLFSBLOCK);
3866         ASSERT(len > 0);
3867         ASSERT(len <= MAXEXTLEN);
3868         ASSERT(!isnullstartblock(bno));
3869         agno = XFS_FSB_TO_AGNO(mp, bno);
3870         agbno = XFS_FSB_TO_AGBNO(mp, bno);
3871         ASSERT(agno < mp->m_sb.sb_agcount);
3872         ASSERT(agbno < mp->m_sb.sb_agblocks);
3873         ASSERT(len < mp->m_sb.sb_agblocks);
3874         ASSERT(agbno + len <= mp->m_sb.sb_agblocks);
3875 #endif
3876         ASSERT(xfs_bmap_free_item_zone != NULL);
3877         new = kmem_zone_alloc(xfs_bmap_free_item_zone, KM_SLEEP);
3878         new->xbfi_startblock = bno;
3879         new->xbfi_blockcount = (xfs_extlen_t)len;
3880         for (prev = NULL, cur = flist->xbf_first;
3881              cur != NULL;
3882              prev = cur, cur = cur->xbfi_next) {
3883                 if (cur->xbfi_startblock >= bno)
3884                         break;
3885         }
3886         if (prev)
3887                 prev->xbfi_next = new;
3888         else
3889                 flist->xbf_first = new;
3890         new->xbfi_next = cur;
3891         flist->xbf_count++;
3892 }
3893
3894 /*
3895  * Compute and fill in the value of the maximum depth of a bmap btree
3896  * in this filesystem.  Done once, during mount.
3897  */
3898 void
3899 xfs_bmap_compute_maxlevels(
3900         xfs_mount_t     *mp,            /* file system mount structure */
3901         int             whichfork)      /* data or attr fork */
3902 {
3903         int             level;          /* btree level */
3904         uint            maxblocks;      /* max blocks at this level */
3905         uint            maxleafents;    /* max leaf entries possible */
3906         int             maxrootrecs;    /* max records in root block */
3907         int             minleafrecs;    /* min records in leaf block */
3908         int             minnoderecs;    /* min records in node block */
3909         int             sz;             /* root block size */
3910
3911         /*
3912          * The maximum number of extents in a file, hence the maximum
3913          * number of leaf entries, is controlled by the type of di_nextents
3914          * (a signed 32-bit number, xfs_extnum_t), or by di_anextents
3915          * (a signed 16-bit number, xfs_aextnum_t).
3916          *
3917          * Note that we can no longer assume that if we are in ATTR1 that
3918          * the fork offset of all the inodes will be
3919          * (xfs_default_attroffset(ip) >> 3) because we could have mounted
3920          * with ATTR2 and then mounted back with ATTR1, keeping the
3921          * di_forkoff's fixed but probably at various positions. Therefore,
3922          * for both ATTR1 and ATTR2 we have to assume the worst case scenario
3923          * of a minimum size available.
3924          */
3925         if (whichfork == XFS_DATA_FORK) {
3926                 maxleafents = MAXEXTNUM;
3927                 sz = XFS_BMDR_SPACE_CALC(MINDBTPTRS);
3928         } else {
3929                 maxleafents = MAXAEXTNUM;
3930                 sz = XFS_BMDR_SPACE_CALC(MINABTPTRS);
3931         }
3932         maxrootrecs = xfs_bmdr_maxrecs(mp, sz, 0);
3933         minleafrecs = mp->m_bmap_dmnr[0];
3934         minnoderecs = mp->m_bmap_dmnr[1];
3935         maxblocks = (maxleafents + minleafrecs - 1) / minleafrecs;
3936         for (level = 1; maxblocks > 1; level++) {
3937                 if (maxblocks <= maxrootrecs)
3938                         maxblocks = 1;
3939                 else
3940                         maxblocks = (maxblocks + minnoderecs - 1) / minnoderecs;
3941         }
3942         mp->m_bm_maxlevels[whichfork] = level;
3943 }
3944
3945 /*
3946  * Routine to be called at transaction's end by xfs_bmapi, xfs_bunmapi
3947  * caller.  Frees all the extents that need freeing, which must be done
3948  * last due to locking considerations.  We never free any extents in
3949  * the first transaction.  This is to allow the caller to make the first
3950  * transaction a synchronous one so that the pointers to the data being
3951  * broken in this transaction will be permanent before the data is actually
3952  * freed.  This is necessary to prevent blocks from being reallocated
3953  * and written to before the free and reallocation are actually permanent.
3954  * We do not just make the first transaction synchronous here, because
3955  * there are more efficient ways to gain the same protection in some cases
3956  * (see the file truncation code).
3957  *
3958  * Return 1 if the given transaction was committed and a new one
3959  * started, and 0 otherwise in the committed parameter.
3960  */
3961 /*ARGSUSED*/
3962 int                                             /* error */
3963 xfs_bmap_finish(
3964         xfs_trans_t             **tp,           /* transaction pointer addr */
3965         xfs_bmap_free_t         *flist,         /* i/o: list extents to free */
3966         int                     *committed)     /* xact committed or not */
3967 {
3968         xfs_efd_log_item_t      *efd;           /* extent free data */
3969         xfs_efi_log_item_t      *efi;           /* extent free intention */
3970         int                     error;          /* error return value */
3971         xfs_bmap_free_item_t    *free;          /* free extent item */
3972         unsigned int            logres;         /* new log reservation */
3973         unsigned int            logcount;       /* new log count */
3974         xfs_mount_t             *mp;            /* filesystem mount structure */
3975         xfs_bmap_free_item_t    *next;          /* next item on free list */
3976         xfs_trans_t             *ntp;           /* new transaction pointer */
3977
3978         ASSERT((*tp)->t_flags & XFS_TRANS_PERM_LOG_RES);
3979         if (flist->xbf_count == 0) {
3980                 *committed = 0;
3981                 return 0;
3982         }
3983         ntp = *tp;
3984         efi = xfs_trans_get_efi(ntp, flist->xbf_count);
3985         for (free = flist->xbf_first; free; free = free->xbfi_next)
3986                 xfs_trans_log_efi_extent(ntp, efi, free->xbfi_startblock,
3987                         free->xbfi_blockcount);
3988         logres = ntp->t_log_res;
3989         logcount = ntp->t_log_count;
3990         ntp = xfs_trans_dup(*tp);
3991         error = xfs_trans_commit(*tp, 0);
3992         *tp = ntp;
3993         *committed = 1;
3994         /*
3995          * We have a new transaction, so we should return committed=1,
3996          * even though we're returning an error.
3997          */
3998         if (error)
3999                 return error;
4000
4001         /*
4002          * transaction commit worked ok so we can drop the extra ticket
4003          * reference that we gained in xfs_trans_dup()
4004          */
4005         xfs_log_ticket_put(ntp->t_ticket);
4006
4007         if ((error = xfs_trans_reserve(ntp, 0, logres, 0, XFS_TRANS_PERM_LOG_RES,
4008                         logcount)))
4009                 return error;
4010         efd = xfs_trans_get_efd(ntp, efi, flist->xbf_count);
4011         for (free = flist->xbf_first; free != NULL; free = next) {
4012                 next = free->xbfi_next;
4013                 if ((error = xfs_free_extent(ntp, free->xbfi_startblock,
4014                                 free->xbfi_blockcount))) {
4015                         /*
4016                          * The bmap free list will be cleaned up at a
4017                          * higher level.  The EFI will be canceled when
4018                          * this transaction is aborted.
4019                          * Need to force shutdown here to make sure it
4020                          * happens, since this transaction may not be
4021                          * dirty yet.
4022                          */
4023                         mp = ntp->t_mountp;
4024                         if (!XFS_FORCED_SHUTDOWN(mp))
4025                                 xfs_force_shutdown(mp,
4026                                                    (error == EFSCORRUPTED) ?
4027                                                    SHUTDOWN_CORRUPT_INCORE :
4028                                                    SHUTDOWN_META_IO_ERROR);
4029                         return error;
4030                 }
4031                 xfs_trans_log_efd_extent(ntp, efd, free->xbfi_startblock,
4032                         free->xbfi_blockcount);
4033                 xfs_bmap_del_free(flist, NULL, free);
4034         }
4035         return 0;
4036 }
4037
4038 /*
4039  * Free up any items left in the list.
4040  */
4041 void
4042 xfs_bmap_cancel(
4043         xfs_bmap_free_t         *flist) /* list of bmap_free_items */
4044 {
4045         xfs_bmap_free_item_t    *free;  /* free list item */
4046         xfs_bmap_free_item_t    *next;
4047
4048         if (flist->xbf_count == 0)
4049                 return;
4050         ASSERT(flist->xbf_first != NULL);
4051         for (free = flist->xbf_first; free; free = next) {
4052                 next = free->xbfi_next;
4053                 xfs_bmap_del_free(flist, NULL, free);
4054         }
4055         ASSERT(flist->xbf_count == 0);
4056 }
4057
4058 /*
4059  * Returns the file-relative block number of the first unused block(s)
4060  * in the file with at least "len" logically contiguous blocks free.
4061  * This is the lowest-address hole if the file has holes, else the first block
4062  * past the end of file.
4063  * Return 0 if the file is currently local (in-inode).
4064  */
4065 int                                             /* error */
4066 xfs_bmap_first_unused(
4067         xfs_trans_t     *tp,                    /* transaction pointer */
4068         xfs_inode_t     *ip,                    /* incore inode */
4069         xfs_extlen_t    len,                    /* size of hole to find */
4070         xfs_fileoff_t   *first_unused,          /* unused block */
4071         int             whichfork)              /* data or attr fork */
4072 {
4073         int             error;                  /* error return value */
4074         int             idx;                    /* extent record index */
4075         xfs_ifork_t     *ifp;                   /* inode fork pointer */
4076         xfs_fileoff_t   lastaddr;               /* last block number seen */
4077         xfs_fileoff_t   lowest;                 /* lowest useful block */
4078         xfs_fileoff_t   max;                    /* starting useful block */
4079         xfs_fileoff_t   off;                    /* offset for this block */
4080         xfs_extnum_t    nextents;               /* number of extent entries */
4081
4082         ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE ||
4083                XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS ||
4084                XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL);
4085         if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) {
4086                 *first_unused = 0;
4087                 return 0;
4088         }
4089         ifp = XFS_IFORK_PTR(ip, whichfork);
4090         if (!(ifp->if_flags & XFS_IFEXTENTS) &&
4091             (error = xfs_iread_extents(tp, ip, whichfork)))
4092                 return error;
4093         lowest = *first_unused;
4094         nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
4095         for (idx = 0, lastaddr = 0, max = lowest; idx < nextents; idx++) {
4096                 xfs_bmbt_rec_host_t *ep = xfs_iext_get_ext(ifp, idx);
4097                 off = xfs_bmbt_get_startoff(ep);
4098                 /*
4099                  * See if the hole before this extent will work.
4100                  */
4101                 if (off >= lowest + len && off - max >= len) {
4102                         *first_unused = max;
4103                         return 0;
4104                 }
4105                 lastaddr = off + xfs_bmbt_get_blockcount(ep);
4106                 max = XFS_FILEOFF_MAX(lastaddr, lowest);
4107         }
4108         *first_unused = max;
4109         return 0;
4110 }
4111
4112 /*
4113  * Returns the file-relative block number of the last block + 1 before
4114  * last_block (input value) in the file.
4115  * This is not based on i_size, it is based on the extent records.
4116  * Returns 0 for local files, as they do not have extent records.
4117  */
4118 int                                             /* error */
4119 xfs_bmap_last_before(
4120         xfs_trans_t     *tp,                    /* transaction pointer */
4121         xfs_inode_t     *ip,                    /* incore inode */
4122         xfs_fileoff_t   *last_block,            /* last block */
4123         int             whichfork)              /* data or attr fork */
4124 {
4125         xfs_fileoff_t   bno;                    /* input file offset */
4126         int             eof;                    /* hit end of file */
4127         xfs_bmbt_rec_host_t *ep;                /* pointer to last extent */
4128         int             error;                  /* error return value */
4129         xfs_bmbt_irec_t got;                    /* current extent value */
4130         xfs_ifork_t     *ifp;                   /* inode fork pointer */
4131         xfs_extnum_t    lastx;                  /* last extent used */
4132         xfs_bmbt_irec_t prev;                   /* previous extent value */
4133
4134         if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE &&
4135             XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
4136             XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_LOCAL)
4137                return XFS_ERROR(EIO);
4138         if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) {
4139                 *last_block = 0;
4140                 return 0;
4141         }
4142         ifp = XFS_IFORK_PTR(ip, whichfork);
4143         if (!(ifp->if_flags & XFS_IFEXTENTS) &&
4144             (error = xfs_iread_extents(tp, ip, whichfork)))
4145                 return error;
4146         bno = *last_block - 1;
4147         ep = xfs_bmap_search_extents(ip, bno, whichfork, &eof, &lastx, &got,
4148                 &prev);
4149         if (eof || xfs_bmbt_get_startoff(ep) > bno) {
4150                 if (prev.br_startoff == NULLFILEOFF)
4151                         *last_block = 0;
4152                 else
4153                         *last_block = prev.br_startoff + prev.br_blockcount;
4154         }
4155         /*
4156          * Otherwise *last_block is already the right answer.
4157          */
4158         return 0;
4159 }
4160
4161 /*
4162  * Returns the file-relative block number of the first block past eof in
4163  * the file.  This is not based on i_size, it is based on the extent records.
4164  * Returns 0 for local files, as they do not have extent records.
4165  */
4166 int                                             /* error */
4167 xfs_bmap_last_offset(
4168         xfs_trans_t     *tp,                    /* transaction pointer */
4169         xfs_inode_t     *ip,                    /* incore inode */
4170         xfs_fileoff_t   *last_block,            /* last block */
4171         int             whichfork)              /* data or attr fork */
4172 {
4173         xfs_bmbt_rec_host_t *ep;                /* pointer to last extent */
4174         int             error;                  /* error return value */
4175         xfs_ifork_t     *ifp;                   /* inode fork pointer */
4176         xfs_extnum_t    nextents;               /* number of extent entries */
4177
4178         if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE &&
4179             XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
4180             XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_LOCAL)
4181                return XFS_ERROR(EIO);
4182         if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) {
4183                 *last_block = 0;
4184                 return 0;
4185         }
4186         ifp = XFS_IFORK_PTR(ip, whichfork);
4187         if (!(ifp->if_flags & XFS_IFEXTENTS) &&
4188             (error = xfs_iread_extents(tp, ip, whichfork)))
4189                 return error;
4190         nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
4191         if (!nextents) {
4192                 *last_block = 0;
4193                 return 0;
4194         }
4195         ep = xfs_iext_get_ext(ifp, nextents - 1);
4196         *last_block = xfs_bmbt_get_startoff(ep) + xfs_bmbt_get_blockcount(ep);
4197         return 0;
4198 }
4199
4200 /*
4201  * Returns whether the selected fork of the inode has exactly one
4202  * block or not.  For the data fork we check this matches di_size,
4203  * implying the file's range is 0..bsize-1.
4204  */
4205 int                                     /* 1=>1 block, 0=>otherwise */
4206 xfs_bmap_one_block(
4207         xfs_inode_t     *ip,            /* incore inode */
4208         int             whichfork)      /* data or attr fork */
4209 {
4210         xfs_bmbt_rec_host_t *ep;        /* ptr to fork's extent */
4211         xfs_ifork_t     *ifp;           /* inode fork pointer */
4212         int             rval;           /* return value */
4213         xfs_bmbt_irec_t s;              /* internal version of extent */
4214
4215 #ifndef DEBUG
4216         if (whichfork == XFS_DATA_FORK) {
4217                 return ((ip->i_d.di_mode & S_IFMT) == S_IFREG) ?
4218                         (ip->i_size == ip->i_mount->m_sb.sb_blocksize) :
4219                         (ip->i_d.di_size == ip->i_mount->m_sb.sb_blocksize);
4220         }
4221 #endif  /* !DEBUG */
4222         if (XFS_IFORK_NEXTENTS(ip, whichfork) != 1)
4223                 return 0;
4224         if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)
4225                 return 0;
4226         ifp = XFS_IFORK_PTR(ip, whichfork);
4227         ASSERT(ifp->if_flags & XFS_IFEXTENTS);
4228         ep = xfs_iext_get_ext(ifp, 0);
4229         xfs_bmbt_get_all(ep, &s);
4230         rval = s.br_startoff == 0 && s.br_blockcount == 1;
4231         if (rval && whichfork == XFS_DATA_FORK)
4232                 ASSERT(ip->i_size == ip->i_mount->m_sb.sb_blocksize);
4233         return rval;
4234 }
4235
4236 STATIC int
4237 xfs_bmap_sanity_check(
4238         struct xfs_mount        *mp,
4239         struct xfs_buf          *bp,
4240         int                     level)
4241 {
4242         struct xfs_btree_block  *block = XFS_BUF_TO_BLOCK(bp);
4243
4244         if (be32_to_cpu(block->bb_magic) != XFS_BMAP_MAGIC ||
4245             be16_to_cpu(block->bb_level) != level ||
4246             be16_to_cpu(block->bb_numrecs) == 0 ||
4247             be16_to_cpu(block->bb_numrecs) > mp->m_bmap_dmxr[level != 0])
4248                 return 0;
4249         return 1;
4250 }
4251
4252 /*
4253  * Read in the extents to if_extents.
4254  * All inode fields are set up by caller, we just traverse the btree
4255  * and copy the records in. If the file system cannot contain unwritten
4256  * extents, the records are checked for no "state" flags.
4257  */
4258 int                                     /* error */
4259 xfs_bmap_read_extents(
4260         xfs_trans_t             *tp,    /* transaction pointer */
4261         xfs_inode_t             *ip,    /* incore inode */
4262         int                     whichfork) /* data or attr fork */
4263 {
4264         struct xfs_btree_block  *block; /* current btree block */
4265         xfs_fsblock_t           bno;    /* block # of "block" */
4266         xfs_buf_t               *bp;    /* buffer for "block" */
4267         int                     error;  /* error return value */
4268         xfs_exntfmt_t           exntf;  /* XFS_EXTFMT_NOSTATE, if checking */
4269         xfs_extnum_t            i, j;   /* index into the extents list */
4270         xfs_ifork_t             *ifp;   /* fork structure */
4271         int                     level;  /* btree level, for checking */
4272         xfs_mount_t             *mp;    /* file system mount structure */
4273         __be64                  *pp;    /* pointer to block address */
4274         /* REFERENCED */
4275         xfs_extnum_t            room;   /* number of entries there's room for */
4276
4277         bno = NULLFSBLOCK;
4278         mp = ip->i_mount;
4279         ifp = XFS_IFORK_PTR(ip, whichfork);
4280         exntf = (whichfork != XFS_DATA_FORK) ? XFS_EXTFMT_NOSTATE :
4281                                         XFS_EXTFMT_INODE(ip);
4282         block = ifp->if_broot;
4283         /*
4284          * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
4285          */
4286         level = be16_to_cpu(block->bb_level);
4287         ASSERT(level > 0);
4288         pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes);
4289         bno = be64_to_cpu(*pp);
4290         ASSERT(bno != NULLDFSBNO);
4291         ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount);
4292         ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks);
4293         /*
4294          * Go down the tree until leaf level is reached, following the first
4295          * pointer (leftmost) at each level.
4296          */
4297         while (level-- > 0) {
4298                 if ((error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp,
4299                                 XFS_BMAP_BTREE_REF)))
4300                         return error;
4301                 block = XFS_BUF_TO_BLOCK(bp);
4302                 XFS_WANT_CORRUPTED_GOTO(
4303                         xfs_bmap_sanity_check(mp, bp, level),
4304                         error0);
4305                 if (level == 0)
4306                         break;
4307                 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
4308                 bno = be64_to_cpu(*pp);
4309                 XFS_WANT_CORRUPTED_GOTO(XFS_FSB_SANITY_CHECK(mp, bno), error0);
4310                 xfs_trans_brelse(tp, bp);
4311         }
4312         /*
4313          * Here with bp and block set to the leftmost leaf node in the tree.
4314          */
4315         room = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
4316         i = 0;
4317         /*
4318          * Loop over all leaf nodes.  Copy information to the extent records.
4319          */
4320         for (;;) {
4321                 xfs_bmbt_rec_t  *frp;
4322                 xfs_fsblock_t   nextbno;
4323                 xfs_extnum_t    num_recs;
4324                 xfs_extnum_t    start;
4325
4326
4327                 num_recs = xfs_btree_get_numrecs(block);
4328                 if (unlikely(i + num_recs > room)) {
4329                         ASSERT(i + num_recs <= room);
4330                         xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount,
4331                                 "corrupt dinode %Lu, (btree extents).",
4332                                 (unsigned long long) ip->i_ino);
4333                         XFS_ERROR_REPORT("xfs_bmap_read_extents(1)",
4334                                          XFS_ERRLEVEL_LOW,
4335                                         ip->i_mount);
4336                         goto error0;
4337                 }
4338                 XFS_WANT_CORRUPTED_GOTO(
4339                         xfs_bmap_sanity_check(mp, bp, 0),
4340                         error0);
4341                 /*
4342                  * Read-ahead the next leaf block, if any.
4343                  */
4344                 nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
4345                 if (nextbno != NULLFSBLOCK)
4346                         xfs_btree_reada_bufl(mp, nextbno, 1);
4347                 /*
4348                  * Copy records into the extent records.
4349                  */
4350                 frp = XFS_BMBT_REC_ADDR(mp, block, 1);
4351                 start = i;
4352                 for (j = 0; j < num_recs; j++, i++, frp++) {
4353                         xfs_bmbt_rec_host_t *trp = xfs_iext_get_ext(ifp, i);
4354                         trp->l0 = be64_to_cpu(frp->l0);
4355                         trp->l1 = be64_to_cpu(frp->l1);
4356                 }
4357                 if (exntf == XFS_EXTFMT_NOSTATE) {
4358                         /*
4359                          * Check all attribute bmap btree records and
4360                          * any "older" data bmap btree records for a
4361                          * set bit in the "extent flag" position.
4362                          */
4363                         if (unlikely(xfs_check_nostate_extents(ifp,
4364                                         start, num_recs))) {
4365                                 XFS_ERROR_REPORT("xfs_bmap_read_extents(2)",
4366                                                  XFS_ERRLEVEL_LOW,
4367                                                  ip->i_mount);
4368                                 goto error0;
4369                         }
4370                 }
4371                 xfs_trans_brelse(tp, bp);
4372                 bno = nextbno;
4373                 /*
4374                  * If we've reached the end, stop.
4375                  */
4376                 if (bno == NULLFSBLOCK)
4377                         break;
4378                 if ((error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp,
4379                                 XFS_BMAP_BTREE_REF)))
4380                         return error;
4381                 block = XFS_BUF_TO_BLOCK(bp);
4382         }
4383         ASSERT(i == (ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t)));
4384         ASSERT(i == XFS_IFORK_NEXTENTS(ip, whichfork));
4385         XFS_BMAP_TRACE_EXLIST(ip, i, whichfork);
4386         return 0;
4387 error0:
4388         xfs_trans_brelse(tp, bp);
4389         return XFS_ERROR(EFSCORRUPTED);
4390 }
4391
4392 #ifdef DEBUG
4393 /*
4394  * Add bmap trace insert entries for all the contents of the extent records.
4395  */
4396 void
4397 xfs_bmap_trace_exlist(
4398         xfs_inode_t     *ip,            /* incore inode pointer */
4399         xfs_extnum_t    cnt,            /* count of entries in the list */
4400         int             whichfork,      /* data or attr fork */
4401         unsigned long   caller_ip)
4402 {
4403         xfs_extnum_t    idx;            /* extent record index */
4404         xfs_ifork_t     *ifp;           /* inode fork pointer */
4405         int             state = 0;
4406
4407         if (whichfork == XFS_ATTR_FORK)
4408                 state |= BMAP_ATTRFORK;
4409
4410         ifp = XFS_IFORK_PTR(ip, whichfork);
4411         ASSERT(cnt == (ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t)));
4412         for (idx = 0; idx < cnt; idx++)
4413                 trace_xfs_extlist(ip, idx, whichfork, caller_ip);
4414 }
4415
4416 /*
4417  * Validate that the bmbt_irecs being returned from bmapi are valid
4418  * given the callers original parameters.  Specifically check the
4419  * ranges of the returned irecs to ensure that they only extent beyond
4420  * the given parameters if the XFS_BMAPI_ENTIRE flag was set.
4421  */
4422 STATIC void
4423 xfs_bmap_validate_ret(
4424         xfs_fileoff_t           bno,
4425         xfs_filblks_t           len,
4426         int                     flags,
4427         xfs_bmbt_irec_t         *mval,
4428         int                     nmap,
4429         int                     ret_nmap)
4430 {
4431         int                     i;              /* index to map values */
4432
4433         ASSERT(ret_nmap <= nmap);
4434
4435         for (i = 0; i < ret_nmap; i++) {
4436                 ASSERT(mval[i].br_blockcount > 0);
4437                 if (!(flags & XFS_BMAPI_ENTIRE)) {
4438                         ASSERT(mval[i].br_startoff >= bno);
4439                         ASSERT(mval[i].br_blockcount <= len);
4440                         ASSERT(mval[i].br_startoff + mval[i].br_blockcount <=
4441                                bno + len);
4442                 } else {
4443                         ASSERT(mval[i].br_startoff < bno + len);
4444                         ASSERT(mval[i].br_startoff + mval[i].br_blockcount >
4445                                bno);
4446                 }
4447                 ASSERT(i == 0 ||
4448                        mval[i - 1].br_startoff + mval[i - 1].br_blockcount ==
4449                        mval[i].br_startoff);
4450                 if ((flags & XFS_BMAPI_WRITE) && !(flags & XFS_BMAPI_DELAY))
4451                         ASSERT(mval[i].br_startblock != DELAYSTARTBLOCK &&
4452                                mval[i].br_startblock != HOLESTARTBLOCK);
4453                 ASSERT(mval[i].br_state == XFS_EXT_NORM ||
4454                        mval[i].br_state == XFS_EXT_UNWRITTEN);
4455         }
4456 }
4457 #endif /* DEBUG */
4458
4459
4460 /*
4461  * Map file blocks to filesystem blocks.
4462  * File range is given by the bno/len pair.
4463  * Adds blocks to file if a write ("flags & XFS_BMAPI_WRITE" set)
4464  * into a hole or past eof.
4465  * Only allocates blocks from a single allocation group,
4466  * to avoid locking problems.
4467  * The returned value in "firstblock" from the first call in a transaction
4468  * must be remembered and presented to subsequent calls in "firstblock".
4469  * An upper bound for the number of blocks to be allocated is supplied to
4470  * the first call in "total"; if no allocation group has that many free
4471  * blocks then the call will fail (return NULLFSBLOCK in "firstblock").
4472  */
4473 int                                     /* error */
4474 xfs_bmapi(
4475         xfs_trans_t     *tp,            /* transaction pointer */
4476         xfs_inode_t     *ip,            /* incore inode */
4477         xfs_fileoff_t   bno,            /* starting file offs. mapped */
4478         xfs_filblks_t   len,            /* length to map in file */
4479         int             flags,          /* XFS_BMAPI_... */
4480         xfs_fsblock_t   *firstblock,    /* first allocated block
4481                                            controls a.g. for allocs */
4482         xfs_extlen_t    total,          /* total blocks needed */
4483         xfs_bmbt_irec_t *mval,          /* output: map values */
4484         int             *nmap,          /* i/o: mval size/count */
4485         xfs_bmap_free_t *flist,         /* i/o: list extents to free */
4486         xfs_extdelta_t  *delta)         /* o: change made to incore extents */
4487 {
4488         xfs_fsblock_t   abno;           /* allocated block number */
4489         xfs_extlen_t    alen;           /* allocated extent length */
4490         xfs_fileoff_t   aoff;           /* allocated file offset */
4491         xfs_bmalloca_t  bma = { 0 };    /* args for xfs_bmap_alloc */
4492         xfs_btree_cur_t *cur;           /* bmap btree cursor */
4493         xfs_fileoff_t   end;            /* end of mapped file region */
4494         int             eof;            /* we've hit the end of extents */
4495         xfs_bmbt_rec_host_t *ep;        /* extent record pointer */
4496         int             error;          /* error return */
4497         xfs_bmbt_irec_t got;            /* current file extent record */
4498         xfs_ifork_t     *ifp;           /* inode fork pointer */
4499         xfs_extlen_t    indlen;         /* indirect blocks length */
4500         xfs_extnum_t    lastx;          /* last useful extent number */
4501         int             logflags;       /* flags for transaction logging */
4502         xfs_extlen_t    minleft;        /* min blocks left after allocation */
4503         xfs_extlen_t    minlen;         /* min allocation size */
4504         xfs_mount_t     *mp;            /* xfs mount structure */
4505         int             n;              /* current extent index */
4506         int             nallocs;        /* number of extents alloc'd */
4507         xfs_extnum_t    nextents;       /* number of extents in file */
4508         xfs_fileoff_t   obno;           /* old block number (offset) */
4509         xfs_bmbt_irec_t prev;           /* previous file extent record */
4510         int             tmp_logflags;   /* temp flags holder */
4511         int             whichfork;      /* data or attr fork */
4512         char            inhole;         /* current location is hole in file */
4513         char            wasdelay;       /* old extent was delayed */
4514         char            wr;             /* this is a write request */
4515         char            rt;             /* this is a realtime file */
4516 #ifdef DEBUG
4517         xfs_fileoff_t   orig_bno;       /* original block number value */
4518         int             orig_flags;     /* original flags arg value */
4519         xfs_filblks_t   orig_len;       /* original value of len arg */
4520         xfs_bmbt_irec_t *orig_mval;     /* original value of mval */
4521         int             orig_nmap;      /* original value of *nmap */
4522
4523         orig_bno = bno;
4524         orig_len = len;
4525         orig_flags = flags;
4526         orig_mval = mval;
4527         orig_nmap = *nmap;
4528 #endif
4529         ASSERT(*nmap >= 1);
4530         ASSERT(*nmap <= XFS_BMAP_MAX_NMAP || !(flags & XFS_BMAPI_WRITE));
4531         whichfork = (flags & XFS_BMAPI_ATTRFORK) ?
4532                 XFS_ATTR_FORK : XFS_DATA_FORK;
4533         mp = ip->i_mount;
4534         if (unlikely(XFS_TEST_ERROR(
4535             (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
4536              XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE &&
4537              XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_LOCAL),
4538              mp, XFS_ERRTAG_BMAPIFORMAT, XFS_RANDOM_BMAPIFORMAT))) {
4539                 XFS_ERROR_REPORT("xfs_bmapi", XFS_ERRLEVEL_LOW, mp);
4540                 return XFS_ERROR(EFSCORRUPTED);
4541         }
4542         if (XFS_FORCED_SHUTDOWN(mp))
4543                 return XFS_ERROR(EIO);
4544         rt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(ip);
4545         ifp = XFS_IFORK_PTR(ip, whichfork);
4546         ASSERT(ifp->if_ext_max ==
4547                XFS_IFORK_SIZE(ip, whichfork) / (uint)sizeof(xfs_bmbt_rec_t));
4548         if ((wr = (flags & XFS_BMAPI_WRITE)) != 0)
4549                 XFS_STATS_INC(xs_blk_mapw);
4550         else
4551                 XFS_STATS_INC(xs_blk_mapr);
4552         /*
4553          * IGSTATE flag is used to combine extents which
4554          * differ only due to the state of the extents.
4555          * This technique is used from xfs_getbmap()
4556          * when the caller does not wish to see the
4557          * separation (which is the default).
4558          *
4559          * This technique is also used when writing a
4560          * buffer which has been partially written,
4561          * (usually by being flushed during a chunkread),
4562          * to ensure one write takes place. This also
4563          * prevents a change in the xfs inode extents at
4564          * this time, intentionally. This change occurs
4565          * on completion of the write operation, in
4566          * xfs_strat_comp(), where the xfs_bmapi() call
4567          * is transactioned, and the extents combined.
4568          */
4569         if ((flags & XFS_BMAPI_IGSTATE) && wr)  /* if writing unwritten space */
4570                 wr = 0;                         /* no allocations are allowed */
4571         ASSERT(wr || !(flags & XFS_BMAPI_DELAY));
4572         logflags = 0;
4573         nallocs = 0;
4574         cur = NULL;
4575         if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) {
4576                 ASSERT(wr && tp);
4577                 if ((error = xfs_bmap_local_to_extents(tp, ip,
4578                                 firstblock, total, &logflags, whichfork)))
4579                         goto error0;
4580         }
4581         if (wr && *firstblock == NULLFSBLOCK) {
4582                 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE)
4583                         minleft = be16_to_cpu(ifp->if_broot->bb_level) + 1;
4584                 else
4585                         minleft = 1;
4586         } else
4587                 minleft = 0;
4588         if (!(ifp->if_flags & XFS_IFEXTENTS) &&
4589             (error = xfs_iread_extents(tp, ip, whichfork)))
4590                 goto error0;
4591         ep = xfs_bmap_search_extents(ip, bno, whichfork, &eof, &lastx, &got,
4592                 &prev);
4593         nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
4594         n = 0;
4595         end = bno + len;
4596         obno = bno;
4597         bma.ip = NULL;
4598         if (delta) {
4599                 delta->xed_startoff = NULLFILEOFF;
4600                 delta->xed_blockcount = 0;
4601         }
4602         while (bno < end && n < *nmap) {
4603                 /*
4604                  * Reading past eof, act as though there's a hole
4605                  * up to end.
4606                  */
4607                 if (eof && !wr)
4608                         got.br_startoff = end;
4609                 inhole = eof || got.br_startoff > bno;
4610                 wasdelay = wr && !inhole && !(flags & XFS_BMAPI_DELAY) &&
4611                         isnullstartblock(got.br_startblock);
4612                 /*
4613                  * First, deal with the hole before the allocated space
4614                  * that we found, if any.
4615                  */
4616                 if (wr && (inhole || wasdelay)) {
4617                         /*
4618                          * For the wasdelay case, we could also just
4619                          * allocate the stuff asked for in this bmap call
4620                          * but that wouldn't be as good.
4621                          */
4622                         if (wasdelay && !(flags & XFS_BMAPI_EXACT)) {
4623                                 alen = (xfs_extlen_t)got.br_blockcount;
4624                                 aoff = got.br_startoff;
4625                                 if (lastx != NULLEXTNUM && lastx) {
4626                                         ep = xfs_iext_get_ext(ifp, lastx - 1);
4627                                         xfs_bmbt_get_all(ep, &prev);
4628                                 }
4629                         } else if (wasdelay) {
4630                                 alen = (xfs_extlen_t)
4631                                         XFS_FILBLKS_MIN(len,
4632                                                 (got.br_startoff +
4633                                                  got.br_blockcount) - bno);
4634                                 aoff = bno;
4635                         } else {
4636                                 alen = (xfs_extlen_t)
4637                                         XFS_FILBLKS_MIN(len, MAXEXTLEN);
4638                                 if (!eof)
4639                                         alen = (xfs_extlen_t)
4640                                                 XFS_FILBLKS_MIN(alen,
4641                                                         got.br_startoff - bno);
4642                                 aoff = bno;
4643                         }
4644                         minlen = (flags & XFS_BMAPI_CONTIG) ? alen : 1;
4645                         if (flags & XFS_BMAPI_DELAY) {
4646                                 xfs_extlen_t    extsz;
4647
4648                                 /* Figure out the extent size, adjust alen */
4649                                 extsz = xfs_get_extsz_hint(ip);
4650                                 if (extsz) {
4651                                         error = xfs_bmap_extsize_align(mp,
4652                                                         &got, &prev, extsz,
4653                                                         rt, eof,
4654                                                         flags&XFS_BMAPI_DELAY,
4655                                                         flags&XFS_BMAPI_CONVERT,
4656                                                         &aoff, &alen);
4657                                         ASSERT(!error);
4658                                 }
4659
4660                                 if (rt)
4661                                         extsz = alen / mp->m_sb.sb_rextsize;
4662
4663                                 /*
4664                                  * Make a transaction-less quota reservation for
4665                                  * delayed allocation blocks. This number gets
4666                                  * adjusted later.  We return if we haven't
4667                                  * allocated blocks already inside this loop.
4668                                  */
4669                                 error = xfs_trans_reserve_quota_nblks(
4670                                                 NULL, ip, (long)alen, 0,
4671                                                 rt ? XFS_QMOPT_RES_RTBLKS :
4672                                                      XFS_QMOPT_RES_REGBLKS);
4673                                 if (error) {
4674                                         if (n == 0) {
4675                                                 *nmap = 0;
4676                                                 ASSERT(cur == NULL);
4677                                                 return error;
4678                                         }
4679                                         break;
4680                                 }
4681
4682                                 /*
4683                                  * Split changing sb for alen and indlen since
4684                                  * they could be coming from different places.
4685                                  */
4686                                 indlen = (xfs_extlen_t)
4687                                         xfs_bmap_worst_indlen(ip, alen);
4688                                 ASSERT(indlen > 0);
4689
4690                                 if (rt) {
4691                                         error = xfs_mod_incore_sb(mp,
4692                                                         XFS_SBS_FREXTENTS,
4693                                                         -((int64_t)extsz), (flags &
4694                                                         XFS_BMAPI_RSVBLOCKS));
4695                                 } else {
4696                                         error = xfs_mod_incore_sb(mp,
4697                                                         XFS_SBS_FDBLOCKS,
4698                                                         -((int64_t)alen), (flags &
4699                                                         XFS_BMAPI_RSVBLOCKS));
4700                                 }
4701                                 if (!error) {
4702                                         error = xfs_mod_incore_sb(mp,
4703                                                         XFS_SBS_FDBLOCKS,
4704                                                         -((int64_t)indlen), (flags &
4705                                                         XFS_BMAPI_RSVBLOCKS));
4706                                         if (error && rt)
4707                                                 xfs_mod_incore_sb(mp,
4708                                                         XFS_SBS_FREXTENTS,
4709                                                         (int64_t)extsz, (flags &
4710                                                         XFS_BMAPI_RSVBLOCKS));
4711                                         else if (error)
4712                                                 xfs_mod_incore_sb(mp,
4713                                                         XFS_SBS_FDBLOCKS,
4714                                                         (int64_t)alen, (flags &
4715                                                         XFS_BMAPI_RSVBLOCKS));
4716                                 }
4717
4718                                 if (error) {
4719                                         if (XFS_IS_QUOTA_ON(mp))
4720                                                 /* unreserve the blocks now */
4721                                                 (void)
4722                                                 xfs_trans_unreserve_quota_nblks(
4723                                                         NULL, ip,
4724                                                         (long)alen, 0, rt ?
4725                                                         XFS_QMOPT_RES_RTBLKS :
4726                                                         XFS_QMOPT_RES_REGBLKS);
4727                                         break;
4728                                 }
4729
4730                                 ip->i_delayed_blks += alen;
4731                                 abno = nullstartblock(indlen);
4732                         } else {
4733                                 /*
4734                                  * If first time, allocate and fill in
4735                                  * once-only bma fields.
4736                                  */
4737                                 if (bma.ip == NULL) {
4738                                         bma.tp = tp;
4739                                         bma.ip = ip;
4740                                         bma.prevp = &prev;
4741                                         bma.gotp = &got;
4742                                         bma.total = total;
4743                                         bma.userdata = 0;
4744                                 }
4745                                 /* Indicate if this is the first user data
4746                                  * in the file, or just any user data.
4747                                  */
4748                                 if (!(flags & XFS_BMAPI_METADATA)) {
4749                                         bma.userdata = (aoff == 0) ?
4750                                                 XFS_ALLOC_INITIAL_USER_DATA :
4751                                                 XFS_ALLOC_USERDATA;
4752                                 }
4753                                 /*
4754                                  * Fill in changeable bma fields.
4755                                  */
4756                                 bma.eof = eof;
4757                                 bma.firstblock = *firstblock;
4758                                 bma.alen = alen;
4759                                 bma.off = aoff;
4760                                 bma.conv = !!(flags & XFS_BMAPI_CONVERT);
4761                                 bma.wasdel = wasdelay;
4762                                 bma.minlen = minlen;
4763                                 bma.low = flist->xbf_low;
4764                                 bma.minleft = minleft;
4765                                 /*
4766                                  * Only want to do the alignment at the
4767                                  * eof if it is userdata and allocation length
4768                                  * is larger than a stripe unit.
4769                                  */
4770                                 if (mp->m_dalign && alen >= mp->m_dalign &&
4771                                     (!(flags & XFS_BMAPI_METADATA)) &&
4772                                     (whichfork == XFS_DATA_FORK)) {
4773                                         if ((error = xfs_bmap_isaeof(ip, aoff,
4774                                                         whichfork, &bma.aeof)))
4775                                                 goto error0;
4776                                 } else
4777                                         bma.aeof = 0;
4778                                 /*
4779                                  * Call allocator.
4780                                  */
4781                                 if ((error = xfs_bmap_alloc(&bma)))
4782                                         goto error0;
4783                                 /*
4784                                  * Copy out result fields.
4785                                  */
4786                                 abno = bma.rval;
4787                                 if ((flist->xbf_low = bma.low))
4788                                         minleft = 0;
4789                                 alen = bma.alen;
4790                                 aoff = bma.off;
4791                                 ASSERT(*firstblock == NULLFSBLOCK ||
4792                                        XFS_FSB_TO_AGNO(mp, *firstblock) ==
4793                                        XFS_FSB_TO_AGNO(mp, bma.firstblock) ||
4794                                        (flist->xbf_low &&
4795                                         XFS_FSB_TO_AGNO(mp, *firstblock) <
4796                                         XFS_FSB_TO_AGNO(mp, bma.firstblock)));
4797                                 *firstblock = bma.firstblock;
4798                                 if (cur)
4799                                         cur->bc_private.b.firstblock =
4800                                                 *firstblock;
4801                                 if (abno == NULLFSBLOCK)
4802                                         break;
4803                                 if ((ifp->if_flags & XFS_IFBROOT) && !cur) {
4804                                         cur = xfs_bmbt_init_cursor(mp, tp,
4805                                                 ip, whichfork);
4806                                         cur->bc_private.b.firstblock =
4807                                                 *firstblock;
4808                                         cur->bc_private.b.flist = flist;
4809                                 }
4810                                 /*
4811                                  * Bump the number of extents we've allocated
4812                                  * in this call.
4813                                  */
4814                                 nallocs++;
4815                         }
4816                         if (cur)
4817                                 cur->bc_private.b.flags =
4818                                         wasdelay ? XFS_BTCUR_BPRV_WASDEL : 0;
4819                         got.br_startoff = aoff;
4820                         got.br_startblock = abno;
4821                         got.br_blockcount = alen;
4822                         got.br_state = XFS_EXT_NORM;    /* assume normal */
4823                         /*
4824                          * Determine state of extent, and the filesystem.
4825                          * A wasdelay extent has been initialized, so
4826                          * shouldn't be flagged as unwritten.
4827                          */
4828                         if (wr && xfs_sb_version_hasextflgbit(&mp->m_sb)) {
4829                                 if (!wasdelay && (flags & XFS_BMAPI_PREALLOC))
4830                                         got.br_state = XFS_EXT_UNWRITTEN;
4831                         }
4832                         error = xfs_bmap_add_extent(ip, lastx, &cur, &got,
4833                                 firstblock, flist, &tmp_logflags, delta,
4834                                 whichfork, (flags & XFS_BMAPI_RSVBLOCKS));
4835                         logflags |= tmp_logflags;
4836                         if (error)
4837                                 goto error0;
4838                         lastx = ifp->if_lastex;
4839                         ep = xfs_iext_get_ext(ifp, lastx);
4840                         nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
4841                         xfs_bmbt_get_all(ep, &got);
4842                         ASSERT(got.br_startoff <= aoff);
4843                         ASSERT(got.br_startoff + got.br_blockcount >=
4844                                 aoff + alen);
4845 #ifdef DEBUG
4846                         if (flags & XFS_BMAPI_DELAY) {
4847                                 ASSERT(isnullstartblock(got.br_startblock));
4848                                 ASSERT(startblockval(got.br_startblock) > 0);
4849                         }
4850                         ASSERT(got.br_state == XFS_EXT_NORM ||
4851                                got.br_state == XFS_EXT_UNWRITTEN);
4852 #endif
4853                         /*
4854                          * Fall down into the found allocated space case.
4855                          */
4856                 } else if (inhole) {
4857                         /*
4858                          * Reading in a hole.
4859                          */
4860                         mval->br_startoff = bno;
4861                         mval->br_startblock = HOLESTARTBLOCK;
4862                         mval->br_blockcount =
4863                                 XFS_FILBLKS_MIN(len, got.br_startoff - bno);
4864                         mval->br_state = XFS_EXT_NORM;
4865                         bno += mval->br_blockcount;
4866                         len -= mval->br_blockcount;
4867                         mval++;
4868                         n++;
4869                         continue;
4870                 }
4871                 /*
4872                  * Then deal with the allocated space we found.
4873                  */
4874                 ASSERT(ep != NULL);
4875                 if (!(flags & XFS_BMAPI_ENTIRE) &&
4876                     (got.br_startoff + got.br_blockcount > obno)) {
4877                         if (obno > bno)
4878                                 bno = obno;
4879                         ASSERT((bno >= obno) || (n == 0));
4880                         ASSERT(bno < end);
4881                         mval->br_startoff = bno;
4882                         if (isnullstartblock(got.br_startblock)) {
4883                                 ASSERT(!wr || (flags & XFS_BMAPI_DELAY));
4884                                 mval->br_startblock = DELAYSTARTBLOCK;
4885                         } else
4886                                 mval->br_startblock =
4887                                         got.br_startblock +
4888                                         (bno - got.br_startoff);
4889                         /*
4890                          * Return the minimum of what we got and what we
4891                          * asked for for the length.  We can use the len
4892                          * variable here because it is modified below
4893                          * and we could have been there before coming
4894                          * here if the first part of the allocation
4895                          * didn't overlap what was asked for.
4896                          */
4897                         mval->br_blockcount =
4898                                 XFS_FILBLKS_MIN(end - bno, got.br_blockcount -
4899                                         (bno - got.br_startoff));
4900                         mval->br_state = got.br_state;
4901                         ASSERT(mval->br_blockcount <= len);
4902                 } else {
4903                         *mval = got;
4904                         if (isnullstartblock(mval->br_startblock)) {
4905                                 ASSERT(!wr || (flags & XFS_BMAPI_DELAY));
4906                                 mval->br_startblock = DELAYSTARTBLOCK;
4907                         }
4908                 }
4909
4910                 /*
4911                  * Check if writing previously allocated but
4912                  * unwritten extents.
4913                  */
4914                 if (wr && mval->br_state == XFS_EXT_UNWRITTEN &&
4915                     ((flags & (XFS_BMAPI_PREALLOC|XFS_BMAPI_DELAY)) == 0)) {
4916                         /*
4917                          * Modify (by adding) the state flag, if writing.
4918                          */
4919                         ASSERT(mval->br_blockcount <= len);
4920                         if ((ifp->if_flags & XFS_IFBROOT) && !cur) {
4921                                 cur = xfs_bmbt_init_cursor(mp,
4922                                         tp, ip, whichfork);
4923                                 cur->bc_private.b.firstblock =
4924                                         *firstblock;
4925                                 cur->bc_private.b.flist = flist;
4926                         }
4927                         mval->br_state = XFS_EXT_NORM;
4928                         error = xfs_bmap_add_extent(ip, lastx, &cur, mval,
4929                                 firstblock, flist, &tmp_logflags, delta,
4930                                 whichfork, (flags & XFS_BMAPI_RSVBLOCKS));
4931                         logflags |= tmp_logflags;
4932                         if (error)
4933                                 goto error0;
4934                         lastx = ifp->if_lastex;
4935                         ep = xfs_iext_get_ext(ifp, lastx);
4936                         nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
4937                         xfs_bmbt_get_all(ep, &got);
4938                         /*
4939                          * We may have combined previously unwritten
4940                          * space with written space, so generate
4941                          * another request.
4942                          */
4943                         if (mval->br_blockcount < len)
4944                                 continue;
4945                 }
4946
4947                 ASSERT((flags & XFS_BMAPI_ENTIRE) ||
4948                        ((mval->br_startoff + mval->br_blockcount) <= end));
4949                 ASSERT((flags & XFS_BMAPI_ENTIRE) ||
4950                        (mval->br_blockcount <= len) ||
4951                        (mval->br_startoff < obno));
4952                 bno = mval->br_startoff + mval->br_blockcount;
4953                 len = end - bno;
4954                 if (n > 0 && mval->br_startoff == mval[-1].br_startoff) {
4955                         ASSERT(mval->br_startblock == mval[-1].br_startblock);
4956                         ASSERT(mval->br_blockcount > mval[-1].br_blockcount);
4957                         ASSERT(mval->br_state == mval[-1].br_state);
4958                         mval[-1].br_blockcount = mval->br_blockcount;
4959                         mval[-1].br_state = mval->br_state;
4960                 } else if (n > 0 && mval->br_startblock != DELAYSTARTBLOCK &&
4961                            mval[-1].br_startblock != DELAYSTARTBLOCK &&
4962                            mval[-1].br_startblock != HOLESTARTBLOCK &&
4963                            mval->br_startblock ==
4964                            mval[-1].br_startblock + mval[-1].br_blockcount &&
4965                            ((flags & XFS_BMAPI_IGSTATE) ||
4966                                 mval[-1].br_state == mval->br_state)) {
4967                         ASSERT(mval->br_startoff ==
4968                                mval[-1].br_startoff + mval[-1].br_blockcount);
4969                         mval[-1].br_blockcount += mval->br_blockcount;
4970                 } else if (n > 0 &&
4971                            mval->br_startblock == DELAYSTARTBLOCK &&
4972                            mval[-1].br_startblock == DELAYSTARTBLOCK &&
4973                            mval->br_startoff ==
4974                            mval[-1].br_startoff + mval[-1].br_blockcount) {
4975                         mval[-1].br_blockcount += mval->br_blockcount;
4976                         mval[-1].br_state = mval->br_state;
4977                 } else if (!((n == 0) &&
4978                              ((mval->br_startoff + mval->br_blockcount) <=
4979                               obno))) {
4980                         mval++;
4981                         n++;
4982                 }
4983                 /*
4984                  * If we're done, stop now.  Stop when we've allocated
4985                  * XFS_BMAP_MAX_NMAP extents no matter what.  Otherwise
4986                  * the transaction may get too big.
4987                  */
4988                 if (bno >= end || n >= *nmap || nallocs >= *nmap)
4989                         break;
4990                 /*
4991                  * Else go on to the next record.
4992                  */
4993                 ep = xfs_iext_get_ext(ifp, ++lastx);
4994                 prev = got;
4995                 if (lastx >= nextents)
4996                         eof = 1;
4997                 else
4998                         xfs_bmbt_get_all(ep, &got);
4999         }
5000         ifp->if_lastex = lastx;
5001         *nmap = n;
5002         /*
5003          * Transform from btree to extents, give it cur.
5004          */
5005         if (tp && XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE &&
5006             XFS_IFORK_NEXTENTS(ip, whichfork) <= ifp->if_ext_max) {
5007                 ASSERT(wr && cur);
5008                 error = xfs_bmap_btree_to_extents(tp, ip, cur,
5009                         &tmp_logflags, whichfork);
5010                 logflags |= tmp_logflags;
5011                 if (error)
5012                         goto error0;
5013         }
5014         ASSERT(ifp->if_ext_max ==
5015                XFS_IFORK_SIZE(ip, whichfork) / (uint)sizeof(xfs_bmbt_rec_t));
5016         ASSERT(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE ||
5017                XFS_IFORK_NEXTENTS(ip, whichfork) > ifp->if_ext_max);
5018         error = 0;
5019         if (delta && delta->xed_startoff != NULLFILEOFF) {
5020                 /* A change was actually made.
5021                  * Note that delta->xed_blockount is an offset at this
5022                  * point and needs to be converted to a block count.
5023                  */
5024                 ASSERT(delta->xed_blockcount > delta->xed_startoff);
5025                 delta->xed_blockcount -= delta->xed_startoff;
5026         }
5027 error0:
5028         /*
5029          * Log everything.  Do this after conversion, there's no point in
5030          * logging the extent records if we've converted to btree format.
5031          */
5032         if ((logflags & xfs_ilog_fext(whichfork)) &&
5033             XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)
5034                 logflags &= ~xfs_ilog_fext(whichfork);
5035         else if ((logflags & xfs_ilog_fbroot(whichfork)) &&
5036                  XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)
5037                 logflags &= ~xfs_ilog_fbroot(whichfork);
5038         /*
5039          * Log whatever the flags say, even if error.  Otherwise we might miss
5040          * detecting a case where the data is changed, there's an error,
5041          * and it's not logged so we don't shutdown when we should.
5042          */
5043         if (logflags) {
5044                 ASSERT(tp && wr);
5045                 xfs_trans_log_inode(tp, ip, logflags);
5046         }
5047         if (cur) {
5048                 if (!error) {
5049                         ASSERT(*firstblock == NULLFSBLOCK ||
5050                                XFS_FSB_TO_AGNO(mp, *firstblock) ==
5051                                XFS_FSB_TO_AGNO(mp,
5052                                        cur->bc_private.b.firstblock) ||
5053                                (flist->xbf_low &&
5054                                 XFS_FSB_TO_AGNO(mp, *firstblock) <
5055                                 XFS_FSB_TO_AGNO(mp,
5056                                         cur->bc_private.b.firstblock)));
5057                         *firstblock = cur->bc_private.b.firstblock;
5058                 }
5059                 xfs_btree_del_cursor(cur,
5060                         error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
5061         }
5062         if (!error)
5063                 xfs_bmap_validate_ret(orig_bno, orig_len, orig_flags, orig_mval,
5064                         orig_nmap, *nmap);
5065         return error;
5066 }
5067
5068 /*
5069  * Map file blocks to filesystem blocks, simple version.
5070  * One block (extent) only, read-only.
5071  * For flags, only the XFS_BMAPI_ATTRFORK flag is examined.
5072  * For the other flag values, the effect is as if XFS_BMAPI_METADATA
5073  * was set and all the others were clear.
5074  */
5075 int                                             /* error */
5076 xfs_bmapi_single(
5077         xfs_trans_t     *tp,            /* transaction pointer */
5078         xfs_inode_t     *ip,            /* incore inode */
5079         int             whichfork,      /* data or attr fork */
5080         xfs_fsblock_t   *fsb,           /* output: mapped block */
5081         xfs_fileoff_t   bno)            /* starting file offs. mapped */
5082 {
5083         int             eof;            /* we've hit the end of extents */
5084         int             error;          /* error return */
5085         xfs_bmbt_irec_t got;            /* current file extent record */
5086         xfs_ifork_t     *ifp;           /* inode fork pointer */
5087         xfs_extnum_t    lastx;          /* last useful extent number */
5088         xfs_bmbt_irec_t prev;           /* previous file extent record */
5089
5090         ifp = XFS_IFORK_PTR(ip, whichfork);
5091         if (unlikely(
5092             XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE &&
5093             XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)) {
5094                XFS_ERROR_REPORT("xfs_bmapi_single", XFS_ERRLEVEL_LOW,
5095                                 ip->i_mount);
5096                return XFS_ERROR(EFSCORRUPTED);
5097         }
5098         if (XFS_FORCED_SHUTDOWN(ip->i_mount))
5099                 return XFS_ERROR(EIO);
5100         XFS_STATS_INC(xs_blk_mapr);
5101         if (!(ifp->if_flags & XFS_IFEXTENTS) &&
5102             (error = xfs_iread_extents(tp, ip, whichfork)))
5103                 return error;
5104         (void)xfs_bmap_search_extents(ip, bno, whichfork, &eof, &lastx, &got,
5105                 &prev);
5106         /*
5107          * Reading past eof, act as though there's a hole
5108          * up to end.
5109          */
5110         if (eof || got.br_startoff > bno) {
5111                 *fsb = NULLFSBLOCK;
5112                 return 0;
5113         }
5114         ASSERT(!isnullstartblock(got.br_startblock));
5115         ASSERT(bno < got.br_startoff + got.br_blockcount);
5116         *fsb = got.br_startblock + (bno - got.br_startoff);
5117         ifp->if_lastex = lastx;
5118         return 0;
5119 }
5120
5121 /*
5122  * Unmap (remove) blocks from a file.
5123  * If nexts is nonzero then the number of extents to remove is limited to
5124  * that value.  If not all extents in the block range can be removed then
5125  * *done is set.
5126  */
5127 int                                             /* error */
5128 xfs_bunmapi(
5129         xfs_trans_t             *tp,            /* transaction pointer */
5130         struct xfs_inode        *ip,            /* incore inode */
5131         xfs_fileoff_t           bno,            /* starting offset to unmap */
5132         xfs_filblks_t           len,            /* length to unmap in file */
5133         int                     flags,          /* misc flags */
5134         xfs_extnum_t            nexts,          /* number of extents max */
5135         xfs_fsblock_t           *firstblock,    /* first allocated block
5136                                                    controls a.g. for allocs */
5137         xfs_bmap_free_t         *flist,         /* i/o: list extents to free */
5138         xfs_extdelta_t          *delta,         /* o: change made to incore
5139                                                    extents */
5140         int                     *done)          /* set if not done yet */
5141 {
5142         xfs_btree_cur_t         *cur;           /* bmap btree cursor */
5143         xfs_bmbt_irec_t         del;            /* extent being deleted */
5144         int                     eof;            /* is deleting at eof */
5145         xfs_bmbt_rec_host_t     *ep;            /* extent record pointer */
5146         int                     error;          /* error return value */
5147         xfs_extnum_t            extno;          /* extent number in list */
5148         xfs_bmbt_irec_t         got;            /* current extent record */
5149         xfs_ifork_t             *ifp;           /* inode fork pointer */
5150         int                     isrt;           /* freeing in rt area */
5151         xfs_extnum_t            lastx;          /* last extent index used */
5152         int                     logflags;       /* transaction logging flags */
5153         xfs_extlen_t            mod;            /* rt extent offset */
5154         xfs_mount_t             *mp;            /* mount structure */
5155         xfs_extnum_t            nextents;       /* number of file extents */
5156         xfs_bmbt_irec_t         prev;           /* previous extent record */
5157         xfs_fileoff_t           start;          /* first file offset deleted */
5158         int                     tmp_logflags;   /* partial logging flags */
5159         int                     wasdel;         /* was a delayed alloc extent */
5160         int                     whichfork;      /* data or attribute fork */
5161         int                     rsvd;           /* OK to allocate reserved blocks */
5162         xfs_fsblock_t           sum;
5163
5164         trace_xfs_bunmap(ip, bno, len, flags, _RET_IP_);
5165
5166         whichfork = (flags & XFS_BMAPI_ATTRFORK) ?
5167                 XFS_ATTR_FORK : XFS_DATA_FORK;
5168         ifp = XFS_IFORK_PTR(ip, whichfork);
5169         if (unlikely(
5170             XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
5171             XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)) {
5172                 XFS_ERROR_REPORT("xfs_bunmapi", XFS_ERRLEVEL_LOW,
5173                                  ip->i_mount);
5174                 return XFS_ERROR(EFSCORRUPTED);
5175         }
5176         mp = ip->i_mount;
5177         if (XFS_FORCED_SHUTDOWN(mp))
5178                 return XFS_ERROR(EIO);
5179         rsvd = (flags & XFS_BMAPI_RSVBLOCKS) != 0;
5180         ASSERT(len > 0);
5181         ASSERT(nexts >= 0);
5182         ASSERT(ifp->if_ext_max ==
5183                XFS_IFORK_SIZE(ip, whichfork) / (uint)sizeof(xfs_bmbt_rec_t));
5184         if (!(ifp->if_flags & XFS_IFEXTENTS) &&
5185             (error = xfs_iread_extents(tp, ip, whichfork)))
5186                 return error;
5187         nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
5188         if (nextents == 0) {
5189                 *done = 1;
5190                 return 0;
5191         }
5192         XFS_STATS_INC(xs_blk_unmap);
5193         isrt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(ip);
5194         start = bno;
5195         bno = start + len - 1;
5196         ep = xfs_bmap_search_extents(ip, bno, whichfork, &eof, &lastx, &got,
5197                 &prev);
5198         if (delta) {
5199                 delta->xed_startoff = NULLFILEOFF;
5200                 delta->xed_blockcount = 0;
5201         }
5202         /*
5203          * Check to see if the given block number is past the end of the
5204          * file, back up to the last block if so...
5205          */
5206         if (eof) {
5207                 ep = xfs_iext_get_ext(ifp, --lastx);
5208                 xfs_bmbt_get_all(ep, &got);
5209                 bno = got.br_startoff + got.br_blockcount - 1;
5210         }
5211         logflags = 0;
5212         if (ifp->if_flags & XFS_IFBROOT) {
5213                 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE);
5214                 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
5215                 cur->bc_private.b.firstblock = *firstblock;
5216                 cur->bc_private.b.flist = flist;
5217                 cur->bc_private.b.flags = 0;
5218         } else
5219                 cur = NULL;
5220         extno = 0;
5221         while (bno != (xfs_fileoff_t)-1 && bno >= start && lastx >= 0 &&
5222                (nexts == 0 || extno < nexts)) {
5223                 /*
5224                  * Is the found extent after a hole in which bno lives?
5225                  * Just back up to the previous extent, if so.
5226                  */
5227                 if (got.br_startoff > bno) {
5228                         if (--lastx < 0)
5229                                 break;
5230                         ep = xfs_iext_get_ext(ifp, lastx);
5231                         xfs_bmbt_get_all(ep, &got);
5232                 }
5233                 /*
5234                  * Is the last block of this extent before the range
5235                  * we're supposed to delete?  If so, we're done.
5236                  */
5237                 bno = XFS_FILEOFF_MIN(bno,
5238                         got.br_startoff + got.br_blockcount - 1);
5239                 if (bno < start)
5240                         break;
5241                 /*
5242                  * Then deal with the (possibly delayed) allocated space
5243                  * we found.
5244                  */
5245                 ASSERT(ep != NULL);
5246                 del = got;
5247                 wasdel = isnullstartblock(del.br_startblock);
5248                 if (got.br_startoff < start) {
5249                         del.br_startoff = start;
5250                         del.br_blockcount -= start - got.br_startoff;
5251                         if (!wasdel)
5252                                 del.br_startblock += start - got.br_startoff;
5253                 }
5254                 if (del.br_startoff + del.br_blockcount > bno + 1)
5255                         del.br_blockcount = bno + 1 - del.br_startoff;
5256                 sum = del.br_startblock + del.br_blockcount;
5257                 if (isrt &&
5258                     (mod = do_mod(sum, mp->m_sb.sb_rextsize))) {
5259                         /*
5260                          * Realtime extent not lined up at the end.
5261                          * The extent could have been split into written
5262                          * and unwritten pieces, or we could just be
5263                          * unmapping part of it.  But we can't really
5264                          * get rid of part of a realtime extent.
5265                          */
5266                         if (del.br_state == XFS_EXT_UNWRITTEN ||
5267                             !xfs_sb_version_hasextflgbit(&mp->m_sb)) {
5268                                 /*
5269                                  * This piece is unwritten, or we're not
5270                                  * using unwritten extents.  Skip over it.
5271                                  */
5272                                 ASSERT(bno >= mod);
5273                                 bno -= mod > del.br_blockcount ?
5274                                         del.br_blockcount : mod;
5275                                 if (bno < got.br_startoff) {
5276                                         if (--lastx >= 0)
5277                                                 xfs_bmbt_get_all(xfs_iext_get_ext(
5278                                                         ifp, lastx), &got);
5279                                 }
5280                                 continue;
5281                         }
5282                         /*
5283                          * It's written, turn it unwritten.
5284                          * This is better than zeroing it.
5285                          */
5286                         ASSERT(del.br_state == XFS_EXT_NORM);
5287                         ASSERT(xfs_trans_get_block_res(tp) > 0);
5288                         /*
5289                          * If this spans a realtime extent boundary,
5290                          * chop it back to the start of the one we end at.
5291                          */
5292                         if (del.br_blockcount > mod) {
5293                                 del.br_startoff += del.br_blockcount - mod;
5294                                 del.br_startblock += del.br_blockcount - mod;
5295                                 del.br_blockcount = mod;
5296                         }
5297                         del.br_state = XFS_EXT_UNWRITTEN;
5298                         error = xfs_bmap_add_extent(ip, lastx, &cur, &del,
5299                                 firstblock, flist, &logflags, delta,
5300                                 XFS_DATA_FORK, 0);
5301                         if (error)
5302                                 goto error0;
5303                         goto nodelete;
5304                 }
5305                 if (isrt && (mod = do_mod(del.br_startblock, mp->m_sb.sb_rextsize))) {
5306                         /*
5307                          * Realtime extent is lined up at the end but not
5308                          * at the front.  We'll get rid of full extents if
5309                          * we can.
5310                          */
5311                         mod = mp->m_sb.sb_rextsize - mod;
5312                         if (del.br_blockcount > mod) {
5313                                 del.br_blockcount -= mod;
5314                                 del.br_startoff += mod;
5315                                 del.br_startblock += mod;
5316                         } else if ((del.br_startoff == start &&
5317                                     (del.br_state == XFS_EXT_UNWRITTEN ||
5318                                      xfs_trans_get_block_res(tp) == 0)) ||
5319                                    !xfs_sb_version_hasextflgbit(&mp->m_sb)) {
5320                                 /*
5321                                  * Can't make it unwritten.  There isn't
5322                                  * a full extent here so just skip it.
5323                                  */
5324                                 ASSERT(bno >= del.br_blockcount);
5325                                 bno -= del.br_blockcount;
5326                                 if (bno < got.br_startoff) {
5327                                         if (--lastx >= 0)
5328                                                 xfs_bmbt_get_all(--ep, &got);
5329                                 }
5330                                 continue;
5331                         } else if (del.br_state == XFS_EXT_UNWRITTEN) {
5332                                 /*
5333                                  * This one is already unwritten.
5334                                  * It must have a written left neighbor.
5335                                  * Unwrite the killed part of that one and
5336                                  * try again.
5337                                  */
5338                                 ASSERT(lastx > 0);
5339                                 xfs_bmbt_get_all(xfs_iext_get_ext(ifp,
5340                                                 lastx - 1), &prev);
5341                                 ASSERT(prev.br_state == XFS_EXT_NORM);
5342                                 ASSERT(!isnullstartblock(prev.br_startblock));
5343                                 ASSERT(del.br_startblock ==
5344                                        prev.br_startblock + prev.br_blockcount);
5345                                 if (prev.br_startoff < start) {
5346                                         mod = start - prev.br_startoff;
5347                                         prev.br_blockcount -= mod;
5348                                         prev.br_startblock += mod;
5349                                         prev.br_startoff = start;
5350                                 }
5351                                 prev.br_state = XFS_EXT_UNWRITTEN;
5352                                 error = xfs_bmap_add_extent(ip, lastx - 1, &cur,
5353                                         &prev, firstblock, flist, &logflags,
5354                                         delta, XFS_DATA_FORK, 0);
5355                                 if (error)
5356                                         goto error0;
5357                                 goto nodelete;
5358                         } else {
5359                                 ASSERT(del.br_state == XFS_EXT_NORM);
5360                                 del.br_state = XFS_EXT_UNWRITTEN;
5361                                 error = xfs_bmap_add_extent(ip, lastx, &cur,
5362                                         &del, firstblock, flist, &logflags,
5363                                         delta, XFS_DATA_FORK, 0);
5364                                 if (error)
5365                                         goto error0;
5366                                 goto nodelete;
5367                         }
5368                 }
5369                 if (wasdel) {
5370                         ASSERT(startblockval(del.br_startblock) > 0);
5371                         /* Update realtime/data freespace, unreserve quota */
5372                         if (isrt) {
5373                                 xfs_filblks_t rtexts;
5374
5375                                 rtexts = XFS_FSB_TO_B(mp, del.br_blockcount);
5376                                 do_div(rtexts, mp->m_sb.sb_rextsize);
5377                                 xfs_mod_incore_sb(mp, XFS_SBS_FREXTENTS,
5378                                                 (int64_t)rtexts, rsvd);
5379                                 (void)xfs_trans_reserve_quota_nblks(NULL,
5380                                         ip, -((long)del.br_blockcount), 0,
5381                                         XFS_QMOPT_RES_RTBLKS);
5382                         } else {
5383                                 xfs_mod_incore_sb(mp, XFS_SBS_FDBLOCKS,
5384                                                 (int64_t)del.br_blockcount, rsvd);
5385                                 (void)xfs_trans_reserve_quota_nblks(NULL,
5386                                         ip, -((long)del.br_blockcount), 0,
5387                                         XFS_QMOPT_RES_REGBLKS);
5388                         }
5389                         ip->i_delayed_blks -= del.br_blockcount;
5390                         if (cur)
5391                                 cur->bc_private.b.flags |=
5392                                         XFS_BTCUR_BPRV_WASDEL;
5393                 } else if (cur)
5394                         cur->bc_private.b.flags &= ~XFS_BTCUR_BPRV_WASDEL;
5395                 /*
5396                  * If it's the case where the directory code is running
5397                  * with no block reservation, and the deleted block is in
5398                  * the middle of its extent, and the resulting insert
5399                  * of an extent would cause transformation to btree format,
5400                  * then reject it.  The calling code will then swap
5401                  * blocks around instead.
5402                  * We have to do this now, rather than waiting for the
5403                  * conversion to btree format, since the transaction
5404                  * will be dirty.
5405                  */
5406                 if (!wasdel && xfs_trans_get_block_res(tp) == 0 &&
5407                     XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS &&
5408                     XFS_IFORK_NEXTENTS(ip, whichfork) >= ifp->if_ext_max &&
5409                     del.br_startoff > got.br_startoff &&
5410                     del.br_startoff + del.br_blockcount <
5411                     got.br_startoff + got.br_blockcount) {
5412                         error = XFS_ERROR(ENOSPC);
5413                         goto error0;
5414                 }
5415                 error = xfs_bmap_del_extent(ip, tp, lastx, flist, cur, &del,
5416                                 &tmp_logflags, delta, whichfork, rsvd);
5417                 logflags |= tmp_logflags;
5418                 if (error)
5419                         goto error0;
5420                 bno = del.br_startoff - 1;
5421 nodelete:
5422                 lastx = ifp->if_lastex;
5423                 /*
5424                  * If not done go on to the next (previous) record.
5425                  * Reset ep in case the extents array was re-alloced.
5426                  */
5427                 ep = xfs_iext_get_ext(ifp, lastx);
5428                 if (bno != (xfs_fileoff_t)-1 && bno >= start) {
5429                         if (lastx >= XFS_IFORK_NEXTENTS(ip, whichfork) ||
5430                             xfs_bmbt_get_startoff(ep) > bno) {
5431                                 if (--lastx >= 0)
5432                                         ep = xfs_iext_get_ext(ifp, lastx);
5433                         }
5434                         if (lastx >= 0)
5435                                 xfs_bmbt_get_all(ep, &got);
5436                         extno++;
5437                 }
5438         }
5439         ifp->if_lastex = lastx;
5440         *done = bno == (xfs_fileoff_t)-1 || bno < start || lastx < 0;
5441         ASSERT(ifp->if_ext_max ==
5442                XFS_IFORK_SIZE(ip, whichfork) / (uint)sizeof(xfs_bmbt_rec_t));
5443         /*
5444          * Convert to a btree if necessary.
5445          */
5446         if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS &&
5447             XFS_IFORK_NEXTENTS(ip, whichfork) > ifp->if_ext_max) {
5448                 ASSERT(cur == NULL);
5449                 error = xfs_bmap_extents_to_btree(tp, ip, firstblock, flist,
5450                         &cur, 0, &tmp_logflags, whichfork);
5451                 logflags |= tmp_logflags;
5452                 if (error)
5453                         goto error0;
5454         }
5455         /*
5456          * transform from btree to extents, give it cur
5457          */
5458         else if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE &&
5459                  XFS_IFORK_NEXTENTS(ip, whichfork) <= ifp->if_ext_max) {
5460                 ASSERT(cur != NULL);
5461                 error = xfs_bmap_btree_to_extents(tp, ip, cur, &tmp_logflags,
5462                         whichfork);
5463                 logflags |= tmp_logflags;
5464                 if (error)
5465                         goto error0;
5466         }
5467         /*
5468          * transform from extents to local?
5469          */
5470         ASSERT(ifp->if_ext_max ==
5471                XFS_IFORK_SIZE(ip, whichfork) / (uint)sizeof(xfs_bmbt_rec_t));
5472         error = 0;
5473         if (delta && delta->xed_startoff != NULLFILEOFF) {
5474                 /* A change was actually made.
5475                  * Note that delta->xed_blockount is an offset at this
5476                  * point and needs to be converted to a block count.
5477                  */
5478                 ASSERT(delta->xed_blockcount > delta->xed_startoff);
5479                 delta->xed_blockcount -= delta->xed_startoff;
5480         }
5481 error0:
5482         /*
5483          * Log everything.  Do this after conversion, there's no point in
5484          * logging the extent records if we've converted to btree format.
5485          */
5486         if ((logflags & xfs_ilog_fext(whichfork)) &&
5487             XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)
5488                 logflags &= ~xfs_ilog_fext(whichfork);
5489         else if ((logflags & xfs_ilog_fbroot(whichfork)) &&
5490                  XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)
5491                 logflags &= ~xfs_ilog_fbroot(whichfork);
5492         /*
5493          * Log inode even in the error case, if the transaction
5494          * is dirty we'll need to shut down the filesystem.
5495          */
5496         if (logflags)
5497                 xfs_trans_log_inode(tp, ip, logflags);
5498         if (cur) {
5499                 if (!error) {
5500                         *firstblock = cur->bc_private.b.firstblock;
5501                         cur->bc_private.b.allocated = 0;
5502                 }
5503                 xfs_btree_del_cursor(cur,
5504                         error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
5505         }
5506         return error;
5507 }
5508
5509 /*
5510  * returns 1 for success, 0 if we failed to map the extent.
5511  */
5512 STATIC int
5513 xfs_getbmapx_fix_eof_hole(
5514         xfs_inode_t             *ip,            /* xfs incore inode pointer */
5515         struct getbmapx         *out,           /* output structure */
5516         int                     prealloced,     /* this is a file with
5517                                                  * preallocated data space */
5518         __int64_t               end,            /* last block requested */
5519         xfs_fsblock_t           startblock)
5520 {
5521         __int64_t               fixlen;
5522         xfs_mount_t             *mp;            /* file system mount point */
5523         xfs_ifork_t             *ifp;           /* inode fork pointer */
5524         xfs_extnum_t            lastx;          /* last extent pointer */
5525         xfs_fileoff_t           fileblock;
5526
5527         if (startblock == HOLESTARTBLOCK) {
5528                 mp = ip->i_mount;
5529                 out->bmv_block = -1;
5530                 fixlen = XFS_FSB_TO_BB(mp, XFS_B_TO_FSB(mp, ip->i_size));
5531                 fixlen -= out->bmv_offset;
5532                 if (prealloced && out->bmv_offset + out->bmv_length == end) {
5533                         /* Came to hole at EOF. Trim it. */
5534                         if (fixlen <= 0)
5535                                 return 0;
5536                         out->bmv_length = fixlen;
5537                 }
5538         } else {
5539                 if (startblock == DELAYSTARTBLOCK)
5540                         out->bmv_block = -2;
5541                 else
5542                         out->bmv_block = xfs_fsb_to_db(ip, startblock);
5543                 fileblock = XFS_BB_TO_FSB(ip->i_mount, out->bmv_offset);
5544                 ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
5545                 if (xfs_iext_bno_to_ext(ifp, fileblock, &lastx) &&
5546                    (lastx == (ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t))-1))
5547                         out->bmv_oflags |= BMV_OF_LAST;
5548         }
5549
5550         return 1;
5551 }
5552
5553 /*
5554  * Get inode's extents as described in bmv, and format for output.
5555  * Calls formatter to fill the user's buffer until all extents
5556  * are mapped, until the passed-in bmv->bmv_count slots have
5557  * been filled, or until the formatter short-circuits the loop,
5558  * if it is tracking filled-in extents on its own.
5559  */
5560 int                                             /* error code */
5561 xfs_getbmap(
5562         xfs_inode_t             *ip,
5563         struct getbmapx         *bmv,           /* user bmap structure */
5564         xfs_bmap_format_t       formatter,      /* format to user */
5565         void                    *arg)           /* formatter arg */
5566 {
5567         __int64_t               bmvend;         /* last block requested */
5568         int                     error = 0;      /* return value */
5569         __int64_t               fixlen;         /* length for -1 case */
5570         int                     i;              /* extent number */
5571         int                     lock;           /* lock state */
5572         xfs_bmbt_irec_t         *map;           /* buffer for user's data */
5573         xfs_mount_t             *mp;            /* file system mount point */
5574         int                     nex;            /* # of user extents can do */
5575         int                     nexleft;        /* # of user extents left */
5576         int                     subnex;         /* # of bmapi's can do */
5577         int                     nmap;           /* number of map entries */
5578         struct getbmapx         *out;           /* output structure */
5579         int                     whichfork;      /* data or attr fork */
5580         int                     prealloced;     /* this is a file with
5581                                                  * preallocated data space */
5582         int                     iflags;         /* interface flags */
5583         int                     bmapi_flags;    /* flags for xfs_bmapi */
5584         int                     cur_ext = 0;
5585
5586         mp = ip->i_mount;
5587         iflags = bmv->bmv_iflags;
5588         whichfork = iflags & BMV_IF_ATTRFORK ? XFS_ATTR_FORK : XFS_DATA_FORK;
5589
5590         if (whichfork == XFS_ATTR_FORK) {
5591                 if (XFS_IFORK_Q(ip)) {
5592                         if (ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS &&
5593                             ip->i_d.di_aformat != XFS_DINODE_FMT_BTREE &&
5594                             ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)
5595                                 return XFS_ERROR(EINVAL);
5596                 } else if (unlikely(
5597                            ip->i_d.di_aformat != 0 &&
5598                            ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS)) {
5599                         XFS_ERROR_REPORT("xfs_getbmap", XFS_ERRLEVEL_LOW,
5600                                          ip->i_mount);
5601                         return XFS_ERROR(EFSCORRUPTED);
5602                 }
5603
5604                 prealloced = 0;
5605                 fixlen = 1LL << 32;
5606         } else {
5607                 if (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS &&
5608                     ip->i_d.di_format != XFS_DINODE_FMT_BTREE &&
5609                     ip->i_d.di_format != XFS_DINODE_FMT_LOCAL)
5610                         return XFS_ERROR(EINVAL);
5611
5612                 if (xfs_get_extsz_hint(ip) ||
5613                     ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC|XFS_DIFLAG_APPEND)){
5614                         prealloced = 1;
5615                         fixlen = XFS_MAXIOFFSET(mp);
5616                 } else {
5617                         prealloced = 0;
5618                         fixlen = ip->i_size;
5619                 }
5620         }
5621
5622         if (bmv->bmv_length == -1) {
5623                 fixlen = XFS_FSB_TO_BB(mp, XFS_B_TO_FSB(mp, fixlen));
5624                 bmv->bmv_length =
5625                         max_t(__int64_t, fixlen - bmv->bmv_offset, 0);
5626         } else if (bmv->bmv_length == 0) {
5627                 bmv->bmv_entries = 0;
5628                 return 0;
5629         } else if (bmv->bmv_length < 0) {
5630                 return XFS_ERROR(EINVAL);
5631         }
5632
5633         nex = bmv->bmv_count - 1;
5634         if (nex <= 0)
5635                 return XFS_ERROR(EINVAL);
5636         bmvend = bmv->bmv_offset + bmv->bmv_length;
5637
5638
5639         if (bmv->bmv_count > ULONG_MAX / sizeof(struct getbmapx))
5640                 return XFS_ERROR(ENOMEM);
5641         out = kmem_zalloc(bmv->bmv_count * sizeof(struct getbmapx), KM_MAYFAIL);
5642         if (!out)
5643                 return XFS_ERROR(ENOMEM);
5644
5645         xfs_ilock(ip, XFS_IOLOCK_SHARED);
5646         if (whichfork == XFS_DATA_FORK && !(iflags & BMV_IF_DELALLOC)) {
5647                 if (ip->i_delayed_blks || ip->i_size > ip->i_d.di_size) {
5648                         error = xfs_flush_pages(ip, 0, -1, 0, FI_REMAPF);
5649                         if (error)
5650                                 goto out_unlock_iolock;
5651                 }
5652
5653                 ASSERT(ip->i_delayed_blks == 0);
5654         }
5655
5656         lock = xfs_ilock_map_shared(ip);
5657
5658         /*
5659          * Don't let nex be bigger than the number of extents
5660          * we can have assuming alternating holes and real extents.
5661          */
5662         if (nex > XFS_IFORK_NEXTENTS(ip, whichfork) * 2 + 1)
5663                 nex = XFS_IFORK_NEXTENTS(ip, whichfork) * 2 + 1;
5664
5665         bmapi_flags = xfs_bmapi_aflag(whichfork);
5666         if (!(iflags & BMV_IF_PREALLOC))
5667                 bmapi_flags |= XFS_BMAPI_IGSTATE;
5668
5669         /*
5670          * Allocate enough space to handle "subnex" maps at a time.
5671          */
5672         error = ENOMEM;
5673         subnex = 16;
5674         map = kmem_alloc(subnex * sizeof(*map), KM_MAYFAIL | KM_NOFS);
5675         if (!map)
5676                 goto out_unlock_ilock;
5677
5678         bmv->bmv_entries = 0;
5679
5680         if (XFS_IFORK_NEXTENTS(ip, whichfork) == 0 &&
5681             (whichfork == XFS_ATTR_FORK || !(iflags & BMV_IF_DELALLOC))) {
5682                 error = 0;
5683                 goto out_free_map;
5684         }
5685
5686         nexleft = nex;
5687
5688         do {
5689                 nmap = (nexleft > subnex) ? subnex : nexleft;
5690                 error = xfs_bmapi(NULL, ip, XFS_BB_TO_FSBT(mp, bmv->bmv_offset),
5691                                   XFS_BB_TO_FSB(mp, bmv->bmv_length),
5692                                   bmapi_flags, NULL, 0, map, &nmap,
5693                                   NULL, NULL);
5694                 if (error)
5695                         goto out_free_map;
5696                 ASSERT(nmap <= subnex);
5697
5698                 for (i = 0; i < nmap && nexleft && bmv->bmv_length; i++) {
5699                         out[cur_ext].bmv_oflags = 0;
5700                         if (map[i].br_state == XFS_EXT_UNWRITTEN)
5701                                 out[cur_ext].bmv_oflags |= BMV_OF_PREALLOC;
5702                         else if (map[i].br_startblock == DELAYSTARTBLOCK)
5703                                 out[cur_ext].bmv_oflags |= BMV_OF_DELALLOC;
5704                         out[cur_ext].bmv_offset =
5705                                 XFS_FSB_TO_BB(mp, map[i].br_startoff);
5706                         out[cur_ext].bmv_length =
5707                                 XFS_FSB_TO_BB(mp, map[i].br_blockcount);
5708                         out[cur_ext].bmv_unused1 = 0;
5709                         out[cur_ext].bmv_unused2 = 0;
5710                         ASSERT(((iflags & BMV_IF_DELALLOC) != 0) ||
5711                               (map[i].br_startblock != DELAYSTARTBLOCK));
5712                         if (map[i].br_startblock == HOLESTARTBLOCK &&
5713                             whichfork == XFS_ATTR_FORK) {
5714                                 /* came to the end of attribute fork */
5715                                 out[cur_ext].bmv_oflags |= BMV_OF_LAST;
5716                                 goto out_free_map;
5717                         }
5718
5719                         if (!xfs_getbmapx_fix_eof_hole(ip, &out[cur_ext],
5720                                         prealloced, bmvend,
5721                                         map[i].br_startblock))
5722                                 goto out_free_map;
5723
5724                         nexleft--;
5725                         bmv->bmv_offset =
5726                                 out[cur_ext].bmv_offset +
5727                                 out[cur_ext].bmv_length;
5728                         bmv->bmv_length =
5729                                 max_t(__int64_t, 0, bmvend - bmv->bmv_offset);
5730                         bmv->bmv_entries++;
5731                         cur_ext++;
5732                 }
5733         } while (nmap && nexleft && bmv->bmv_length);
5734
5735  out_free_map:
5736         kmem_free(map);
5737  out_unlock_ilock:
5738         xfs_iunlock_map_shared(ip, lock);
5739  out_unlock_iolock:
5740         xfs_iunlock(ip, XFS_IOLOCK_SHARED);
5741
5742         for (i = 0; i < cur_ext; i++) {
5743                 int full = 0;   /* user array is full */
5744
5745                 /* format results & advance arg */
5746                 error = formatter(&arg, &out[i], &full);
5747                 if (error || full)
5748                         break;
5749         }
5750
5751         kmem_free(out);
5752         return error;
5753 }
5754
5755 /*
5756  * Check the last inode extent to determine whether this allocation will result
5757  * in blocks being allocated at the end of the file. When we allocate new data
5758  * blocks at the end of the file which do not start at the previous data block,
5759  * we will try to align the new blocks at stripe unit boundaries.
5760  */
5761 STATIC int                              /* error */
5762 xfs_bmap_isaeof(
5763         xfs_inode_t     *ip,            /* incore inode pointer */
5764         xfs_fileoff_t   off,            /* file offset in fsblocks */
5765         int             whichfork,      /* data or attribute fork */
5766         char            *aeof)          /* return value */
5767 {
5768         int             error;          /* error return value */
5769         xfs_ifork_t     *ifp;           /* inode fork pointer */
5770         xfs_bmbt_rec_host_t *lastrec;   /* extent record pointer */
5771         xfs_extnum_t    nextents;       /* number of file extents */
5772         xfs_bmbt_irec_t s;              /* expanded extent record */
5773
5774         ASSERT(whichfork == XFS_DATA_FORK);
5775         ifp = XFS_IFORK_PTR(ip, whichfork);
5776         if (!(ifp->if_flags & XFS_IFEXTENTS) &&
5777             (error = xfs_iread_extents(NULL, ip, whichfork)))
5778                 return error;
5779         nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
5780         if (nextents == 0) {
5781                 *aeof = 1;
5782                 return 0;
5783         }
5784         /*
5785          * Go to the last extent
5786          */
5787         lastrec = xfs_iext_get_ext(ifp, nextents - 1);
5788         xfs_bmbt_get_all(lastrec, &s);
5789         /*
5790          * Check we are allocating in the last extent (for delayed allocations)
5791          * or past the last extent for non-delayed allocations.
5792          */
5793         *aeof = (off >= s.br_startoff &&
5794                  off < s.br_startoff + s.br_blockcount &&
5795                  isnullstartblock(s.br_startblock)) ||
5796                 off >= s.br_startoff + s.br_blockcount;
5797         return 0;
5798 }
5799
5800 /*
5801  * Check if the endoff is outside the last extent. If so the caller will grow
5802  * the allocation to a stripe unit boundary.
5803  */
5804 int                                     /* error */
5805 xfs_bmap_eof(
5806         xfs_inode_t     *ip,            /* incore inode pointer */
5807         xfs_fileoff_t   endoff,         /* file offset in fsblocks */
5808         int             whichfork,      /* data or attribute fork */
5809         int             *eof)           /* result value */
5810 {
5811         xfs_fsblock_t   blockcount;     /* extent block count */
5812         int             error;          /* error return value */
5813         xfs_ifork_t     *ifp;           /* inode fork pointer */
5814         xfs_bmbt_rec_host_t *lastrec;   /* extent record pointer */
5815         xfs_extnum_t    nextents;       /* number of file extents */
5816         xfs_fileoff_t   startoff;       /* extent starting file offset */
5817
5818         ASSERT(whichfork == XFS_DATA_FORK);
5819         ifp = XFS_IFORK_PTR(ip, whichfork);
5820         if (!(ifp->if_flags & XFS_IFEXTENTS) &&
5821             (error = xfs_iread_extents(NULL, ip, whichfork)))
5822                 return error;
5823         nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
5824         if (nextents == 0) {
5825                 *eof = 1;
5826                 return 0;
5827         }
5828         /*
5829          * Go to the last extent
5830          */
5831         lastrec = xfs_iext_get_ext(ifp, nextents - 1);
5832         startoff = xfs_bmbt_get_startoff(lastrec);
5833         blockcount = xfs_bmbt_get_blockcount(lastrec);
5834         *eof = endoff >= startoff + blockcount;
5835         return 0;
5836 }
5837
5838 #ifdef DEBUG
5839 STATIC
5840 xfs_buf_t *
5841 xfs_bmap_get_bp(
5842         xfs_btree_cur_t         *cur,
5843         xfs_fsblock_t           bno)
5844 {
5845         int i;
5846         xfs_buf_t *bp;
5847
5848         if (!cur)
5849                 return(NULL);
5850
5851         bp = NULL;
5852         for(i = 0; i < XFS_BTREE_MAXLEVELS; i++) {
5853                 bp = cur->bc_bufs[i];
5854                 if (!bp) break;
5855                 if (XFS_BUF_ADDR(bp) == bno)
5856                         break;  /* Found it */
5857         }
5858         if (i == XFS_BTREE_MAXLEVELS)
5859                 bp = NULL;
5860
5861         if (!bp) { /* Chase down all the log items to see if the bp is there */
5862                 xfs_log_item_chunk_t    *licp;
5863                 xfs_trans_t             *tp;
5864
5865                 tp = cur->bc_tp;
5866                 licp = &tp->t_items;
5867                 while (!bp && licp != NULL) {
5868                         if (xfs_lic_are_all_free(licp)) {
5869                                 licp = licp->lic_next;
5870                                 continue;
5871                         }
5872                         for (i = 0; i < licp->lic_unused; i++) {
5873                                 xfs_log_item_desc_t     *lidp;
5874                                 xfs_log_item_t          *lip;
5875                                 xfs_buf_log_item_t      *bip;
5876                                 xfs_buf_t               *lbp;
5877
5878                                 if (xfs_lic_isfree(licp, i)) {
5879                                         continue;
5880                                 }
5881
5882                                 lidp = xfs_lic_slot(licp, i);
5883                                 lip = lidp->lid_item;
5884                                 if (lip->li_type != XFS_LI_BUF)
5885                                         continue;
5886
5887                                 bip = (xfs_buf_log_item_t *)lip;
5888                                 lbp = bip->bli_buf;
5889
5890                                 if (XFS_BUF_ADDR(lbp) == bno) {
5891                                         bp = lbp;
5892                                         break; /* Found it */
5893                                 }
5894                         }
5895                         licp = licp->lic_next;
5896                 }
5897         }
5898         return(bp);
5899 }
5900
5901 STATIC void
5902 xfs_check_block(
5903         struct xfs_btree_block  *block,
5904         xfs_mount_t             *mp,
5905         int                     root,
5906         short                   sz)
5907 {
5908         int                     i, j, dmxr;
5909         __be64                  *pp, *thispa;   /* pointer to block address */
5910         xfs_bmbt_key_t          *prevp, *keyp;
5911
5912         ASSERT(be16_to_cpu(block->bb_level) > 0);
5913
5914         prevp = NULL;
5915         for( i = 1; i <= xfs_btree_get_numrecs(block); i++) {
5916                 dmxr = mp->m_bmap_dmxr[0];
5917                 keyp = XFS_BMBT_KEY_ADDR(mp, block, i);
5918
5919                 if (prevp) {
5920                         ASSERT(be64_to_cpu(prevp->br_startoff) <
5921                                be64_to_cpu(keyp->br_startoff));
5922                 }
5923                 prevp = keyp;
5924
5925                 /*
5926                  * Compare the block numbers to see if there are dups.
5927                  */
5928                 if (root)
5929                         pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, i, sz);
5930                 else
5931                         pp = XFS_BMBT_PTR_ADDR(mp, block, i, dmxr);
5932
5933                 for (j = i+1; j <= be16_to_cpu(block->bb_numrecs); j++) {
5934                         if (root)
5935                                 thispa = XFS_BMAP_BROOT_PTR_ADDR(mp, block, j, sz);
5936                         else
5937                                 thispa = XFS_BMBT_PTR_ADDR(mp, block, j, dmxr);
5938                         if (*thispa == *pp) {
5939                                 cmn_err(CE_WARN, "%s: thispa(%d) == pp(%d) %Ld",
5940                                         __func__, j, i,
5941                                         (unsigned long long)be64_to_cpu(*thispa));
5942                                 panic("%s: ptrs are equal in node\n",
5943                                         __func__);
5944                         }
5945                 }
5946         }
5947 }
5948
5949 /*
5950  * Check that the extents for the inode ip are in the right order in all
5951  * btree leaves.
5952  */
5953
5954 STATIC void
5955 xfs_bmap_check_leaf_extents(
5956         xfs_btree_cur_t         *cur,   /* btree cursor or null */
5957         xfs_inode_t             *ip,            /* incore inode pointer */
5958         int                     whichfork)      /* data or attr fork */
5959 {
5960         struct xfs_btree_block  *block; /* current btree block */
5961         xfs_fsblock_t           bno;    /* block # of "block" */
5962         xfs_buf_t               *bp;    /* buffer for "block" */
5963         int                     error;  /* error return value */
5964         xfs_extnum_t            i=0, j; /* index into the extents list */
5965         xfs_ifork_t             *ifp;   /* fork structure */
5966         int                     level;  /* btree level, for checking */
5967         xfs_mount_t             *mp;    /* file system mount structure */
5968         __be64                  *pp;    /* pointer to block address */
5969         xfs_bmbt_rec_t          *ep;    /* pointer to current extent */
5970         xfs_bmbt_rec_t          last = {0, 0}; /* last extent in prev block */
5971         xfs_bmbt_rec_t          *nextp; /* pointer to next extent */
5972         int                     bp_release = 0;
5973
5974         if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE) {
5975                 return;
5976         }
5977
5978         bno = NULLFSBLOCK;
5979         mp = ip->i_mount;
5980         ifp = XFS_IFORK_PTR(ip, whichfork);
5981         block = ifp->if_broot;
5982         /*
5983          * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
5984          */
5985         level = be16_to_cpu(block->bb_level);
5986         ASSERT(level > 0);
5987         xfs_check_block(block, mp, 1, ifp->if_broot_bytes);
5988         pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes);
5989         bno = be64_to_cpu(*pp);
5990
5991         ASSERT(bno != NULLDFSBNO);
5992         ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount);
5993         ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks);
5994
5995         /*
5996          * Go down the tree until leaf level is reached, following the first
5997          * pointer (leftmost) at each level.
5998          */
5999         while (level-- > 0) {
6000                 /* See if buf is in cur first */
6001                 bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno));
6002                 if (bp) {
6003                         bp_release = 0;
6004                 } else {
6005                         bp_release = 1;
6006                 }
6007                 if (!bp && (error = xfs_btree_read_bufl(mp, NULL, bno, 0, &bp,
6008                                 XFS_BMAP_BTREE_REF)))
6009                         goto error_norelse;
6010                 block = XFS_BUF_TO_BLOCK(bp);
6011                 XFS_WANT_CORRUPTED_GOTO(
6012                         xfs_bmap_sanity_check(mp, bp, level),
6013                         error0);
6014                 if (level == 0)
6015                         break;
6016
6017                 /*
6018                  * Check this block for basic sanity (increasing keys and
6019                  * no duplicate blocks).
6020                  */
6021
6022                 xfs_check_block(block, mp, 0, 0);
6023                 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
6024                 bno = be64_to_cpu(*pp);
6025                 XFS_WANT_CORRUPTED_GOTO(XFS_FSB_SANITY_CHECK(mp, bno), error0);
6026                 if (bp_release) {
6027                         bp_release = 0;
6028                         xfs_trans_brelse(NULL, bp);
6029                 }
6030         }
6031
6032         /*
6033          * Here with bp and block set to the leftmost leaf node in the tree.
6034          */
6035         i = 0;
6036
6037         /*
6038          * Loop over all leaf nodes checking that all extents are in the right order.
6039          */
6040         for (;;) {
6041                 xfs_fsblock_t   nextbno;
6042                 xfs_extnum_t    num_recs;
6043
6044
6045                 num_recs = xfs_btree_get_numrecs(block);
6046
6047                 /*
6048                  * Read-ahead the next leaf block, if any.
6049                  */
6050
6051                 nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
6052
6053                 /*
6054                  * Check all the extents to make sure they are OK.
6055                  * If we had a previous block, the last entry should
6056                  * conform with the first entry in this one.
6057                  */
6058
6059                 ep = XFS_BMBT_REC_ADDR(mp, block, 1);
6060                 if (i) {
6061                         ASSERT(xfs_bmbt_disk_get_startoff(&last) +
6062                                xfs_bmbt_disk_get_blockcount(&last) <=
6063                                xfs_bmbt_disk_get_startoff(ep));
6064                 }
6065                 for (j = 1; j < num_recs; j++) {
6066                         nextp = XFS_BMBT_REC_ADDR(mp, block, j + 1);
6067                         ASSERT(xfs_bmbt_disk_get_startoff(ep) +
6068                                xfs_bmbt_disk_get_blockcount(ep) <=
6069                                xfs_bmbt_disk_get_startoff(nextp));
6070                         ep = nextp;
6071                 }
6072
6073                 last = *ep;
6074                 i += num_recs;
6075                 if (bp_release) {
6076                         bp_release = 0;
6077                         xfs_trans_brelse(NULL, bp);
6078                 }
6079                 bno = nextbno;
6080                 /*
6081                  * If we've reached the end, stop.
6082                  */
6083                 if (bno == NULLFSBLOCK)
6084                         break;
6085
6086                 bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno));
6087                 if (bp) {
6088                         bp_release = 0;
6089                 } else {
6090                         bp_release = 1;
6091                 }
6092                 if (!bp && (error = xfs_btree_read_bufl(mp, NULL, bno, 0, &bp,
6093                                 XFS_BMAP_BTREE_REF)))
6094                         goto error_norelse;
6095                 block = XFS_BUF_TO_BLOCK(bp);
6096         }
6097         if (bp_release) {
6098                 bp_release = 0;
6099                 xfs_trans_brelse(NULL, bp);
6100         }
6101         return;
6102
6103 error0:
6104         cmn_err(CE_WARN, "%s: at error0", __func__);
6105         if (bp_release)
6106                 xfs_trans_brelse(NULL, bp);
6107 error_norelse:
6108         cmn_err(CE_WARN, "%s: BAD after btree leaves for %d extents",
6109                 __func__, i);
6110         panic("%s: CORRUPTED BTREE OR SOMETHING", __func__);
6111         return;
6112 }
6113 #endif
6114
6115 /*
6116  * Count fsblocks of the given fork.
6117  */
6118 int                                             /* error */
6119 xfs_bmap_count_blocks(
6120         xfs_trans_t             *tp,            /* transaction pointer */
6121         xfs_inode_t             *ip,            /* incore inode */
6122         int                     whichfork,      /* data or attr fork */
6123         int                     *count)         /* out: count of blocks */
6124 {
6125         struct xfs_btree_block  *block; /* current btree block */
6126         xfs_fsblock_t           bno;    /* block # of "block" */
6127         xfs_ifork_t             *ifp;   /* fork structure */
6128         int                     level;  /* btree level, for checking */
6129         xfs_mount_t             *mp;    /* file system mount structure */
6130         __be64                  *pp;    /* pointer to block address */
6131
6132         bno = NULLFSBLOCK;
6133         mp = ip->i_mount;
6134         ifp = XFS_IFORK_PTR(ip, whichfork);
6135         if ( XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS ) {
6136                 xfs_bmap_count_leaves(ifp, 0,
6137                         ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t),
6138                         count);
6139                 return 0;
6140         }
6141
6142         /*
6143          * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
6144          */
6145         block = ifp->if_broot;
6146         level = be16_to_cpu(block->bb_level);
6147         ASSERT(level > 0);
6148         pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes);
6149         bno = be64_to_cpu(*pp);
6150         ASSERT(bno != NULLDFSBNO);
6151         ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount);
6152         ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks);
6153
6154         if (unlikely(xfs_bmap_count_tree(mp, tp, ifp, bno, level, count) < 0)) {
6155                 XFS_ERROR_REPORT("xfs_bmap_count_blocks(2)", XFS_ERRLEVEL_LOW,
6156                                  mp);
6157                 return XFS_ERROR(EFSCORRUPTED);
6158         }
6159
6160         return 0;
6161 }
6162
6163 /*
6164  * Recursively walks each level of a btree
6165  * to count total fsblocks is use.
6166  */
6167 STATIC int                                     /* error */
6168 xfs_bmap_count_tree(
6169         xfs_mount_t     *mp,            /* file system mount point */
6170         xfs_trans_t     *tp,            /* transaction pointer */
6171         xfs_ifork_t     *ifp,           /* inode fork pointer */
6172         xfs_fsblock_t   blockno,        /* file system block number */
6173         int             levelin,        /* level in btree */
6174         int             *count)         /* Count of blocks */
6175 {
6176         int                     error;
6177         xfs_buf_t               *bp, *nbp;
6178         int                     level = levelin;
6179         __be64                  *pp;
6180         xfs_fsblock_t           bno = blockno;
6181         xfs_fsblock_t           nextbno;
6182         struct xfs_btree_block  *block, *nextblock;
6183         int                     numrecs;
6184
6185         if ((error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp, XFS_BMAP_BTREE_REF)))
6186                 return error;
6187         *count += 1;
6188         block = XFS_BUF_TO_BLOCK(bp);
6189
6190         if (--level) {
6191                 /* Not at node above leaves, count this level of nodes */
6192                 nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
6193                 while (nextbno != NULLFSBLOCK) {
6194                         if ((error = xfs_btree_read_bufl(mp, tp, nextbno,
6195                                 0, &nbp, XFS_BMAP_BTREE_REF)))
6196                                 return error;
6197                         *count += 1;
6198                         nextblock = XFS_BUF_TO_BLOCK(nbp);
6199                         nextbno = be64_to_cpu(nextblock->bb_u.l.bb_rightsib);
6200                         xfs_trans_brelse(tp, nbp);
6201                 }
6202
6203                 /* Dive to the next level */
6204                 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
6205                 bno = be64_to_cpu(*pp);
6206                 if (unlikely((error =
6207                      xfs_bmap_count_tree(mp, tp, ifp, bno, level, count)) < 0)) {
6208                         xfs_trans_brelse(tp, bp);
6209                         XFS_ERROR_REPORT("xfs_bmap_count_tree(1)",
6210                                          XFS_ERRLEVEL_LOW, mp);
6211                         return XFS_ERROR(EFSCORRUPTED);
6212                 }
6213                 xfs_trans_brelse(tp, bp);
6214         } else {
6215                 /* count all level 1 nodes and their leaves */
6216                 for (;;) {
6217                         nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
6218                         numrecs = be16_to_cpu(block->bb_numrecs);
6219                         xfs_bmap_disk_count_leaves(mp, block, numrecs, count);
6220                         xfs_trans_brelse(tp, bp);
6221                         if (nextbno == NULLFSBLOCK)
6222                                 break;
6223                         bno = nextbno;
6224                         if ((error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp,
6225                                 XFS_BMAP_BTREE_REF)))
6226                                 return error;
6227                         *count += 1;
6228                         block = XFS_BUF_TO_BLOCK(bp);
6229                 }
6230         }
6231         return 0;
6232 }
6233
6234 /*
6235  * Count leaf blocks given a range of extent records.
6236  */
6237 STATIC void
6238 xfs_bmap_count_leaves(
6239         xfs_ifork_t             *ifp,
6240         xfs_extnum_t            idx,
6241         int                     numrecs,
6242         int                     *count)
6243 {
6244         int             b;
6245
6246         for (b = 0; b < numrecs; b++) {
6247                 xfs_bmbt_rec_host_t *frp = xfs_iext_get_ext(ifp, idx + b);
6248                 *count += xfs_bmbt_get_blockcount(frp);
6249         }
6250 }
6251
6252 /*
6253  * Count leaf blocks given a range of extent records originally
6254  * in btree format.
6255  */
6256 STATIC void
6257 xfs_bmap_disk_count_leaves(
6258         struct xfs_mount        *mp,
6259         struct xfs_btree_block  *block,
6260         int                     numrecs,
6261         int                     *count)
6262 {
6263         int             b;
6264         xfs_bmbt_rec_t  *frp;
6265
6266         for (b = 1; b <= numrecs; b++) {
6267                 frp = XFS_BMBT_REC_ADDR(mp, block, b);
6268                 *count += xfs_bmbt_disk_get_blockcount(frp);
6269         }
6270 }