2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 #include "xfs_types.h"
24 #include "xfs_trans.h"
28 #include "xfs_da_btree.h"
29 #include "xfs_bmap_btree.h"
30 #include "xfs_alloc_btree.h"
31 #include "xfs_ialloc_btree.h"
32 #include "xfs_dinode.h"
33 #include "xfs_inode.h"
34 #include "xfs_btree.h"
35 #include "xfs_mount.h"
36 #include "xfs_itable.h"
37 #include "xfs_inode_item.h"
38 #include "xfs_extfree_item.h"
39 #include "xfs_alloc.h"
41 #include "xfs_rtalloc.h"
42 #include "xfs_error.h"
43 #include "xfs_attr_leaf.h"
45 #include "xfs_quota.h"
46 #include "xfs_trans_space.h"
47 #include "xfs_buf_item.h"
48 #include "xfs_filestream.h"
49 #include "xfs_vnodeops.h"
50 #include "xfs_trace.h"
53 kmem_zone_t *xfs_bmap_free_item_zone;
56 * Prototypes for internal bmap routines.
61 xfs_bmap_check_leaf_extents(
62 struct xfs_btree_cur *cur,
66 #define xfs_bmap_check_leaf_extents(cur, ip, whichfork) do { } while (0)
71 * Called from xfs_bmap_add_attrfork to handle extents format files.
73 STATIC int /* error */
74 xfs_bmap_add_attrfork_extents(
75 xfs_trans_t *tp, /* transaction pointer */
76 xfs_inode_t *ip, /* incore inode pointer */
77 xfs_fsblock_t *firstblock, /* first block allocated */
78 xfs_bmap_free_t *flist, /* blocks to free at commit */
79 int *flags); /* inode logging flags */
82 * Called from xfs_bmap_add_attrfork to handle local format files.
84 STATIC int /* error */
85 xfs_bmap_add_attrfork_local(
86 xfs_trans_t *tp, /* transaction pointer */
87 xfs_inode_t *ip, /* incore inode pointer */
88 xfs_fsblock_t *firstblock, /* first block allocated */
89 xfs_bmap_free_t *flist, /* blocks to free at commit */
90 int *flags); /* inode logging flags */
93 * xfs_bmap_alloc is called by xfs_bmapi to allocate an extent for a file.
94 * It figures out where to ask the underlying allocator to put the new extent.
96 STATIC int /* error */
98 xfs_bmalloca_t *ap); /* bmap alloc argument struct */
101 * Transform a btree format file with only one leaf node, where the
102 * extents list will fit in the inode, into an extents format file.
103 * Since the file extents are already in-core, all we have to do is
104 * give up the space for the btree root and pitch the leaf block.
106 STATIC int /* error */
107 xfs_bmap_btree_to_extents(
108 xfs_trans_t *tp, /* transaction pointer */
109 xfs_inode_t *ip, /* incore inode pointer */
110 xfs_btree_cur_t *cur, /* btree cursor */
111 int *logflagsp, /* inode logging flags */
112 int whichfork); /* data or attr fork */
115 * Remove the entry "free" from the free item list. Prev points to the
116 * previous entry, unless "free" is the head of the list.
120 xfs_bmap_free_t *flist, /* free item list header */
121 xfs_bmap_free_item_t *prev, /* previous item on list, if any */
122 xfs_bmap_free_item_t *free); /* list item to be freed */
125 * Convert an extents-format file into a btree-format file.
126 * The new file will have a root block (in the inode) and a single child block.
128 STATIC int /* error */
129 xfs_bmap_extents_to_btree(
130 xfs_trans_t *tp, /* transaction pointer */
131 xfs_inode_t *ip, /* incore inode pointer */
132 xfs_fsblock_t *firstblock, /* first-block-allocated */
133 xfs_bmap_free_t *flist, /* blocks freed in xaction */
134 xfs_btree_cur_t **curp, /* cursor returned to caller */
135 int wasdel, /* converting a delayed alloc */
136 int *logflagsp, /* inode logging flags */
137 int whichfork); /* data or attr fork */
140 * Convert a local file to an extents file.
141 * This code is sort of bogus, since the file data needs to get
142 * logged so it won't be lost. The bmap-level manipulations are ok, though.
144 STATIC int /* error */
145 xfs_bmap_local_to_extents(
146 xfs_trans_t *tp, /* transaction pointer */
147 xfs_inode_t *ip, /* incore inode pointer */
148 xfs_fsblock_t *firstblock, /* first block allocated in xaction */
149 xfs_extlen_t total, /* total blocks needed by transaction */
150 int *logflagsp, /* inode logging flags */
151 int whichfork); /* data or attr fork */
154 * Search the extents list for the inode, for the extent containing bno.
155 * If bno lies in a hole, point to the next entry. If bno lies past eof,
156 * *eofp will be set, and *prevp will contain the last entry (null if none).
157 * Else, *lastxp will be set to the index of the found
158 * entry; *gotp will contain the entry.
160 STATIC xfs_bmbt_rec_host_t * /* pointer to found extent entry */
161 xfs_bmap_search_extents(
162 xfs_inode_t *ip, /* incore inode pointer */
163 xfs_fileoff_t bno, /* block number searched for */
164 int whichfork, /* data or attr fork */
165 int *eofp, /* out: end of file found */
166 xfs_extnum_t *lastxp, /* out: last extent index */
167 xfs_bmbt_irec_t *gotp, /* out: extent entry found */
168 xfs_bmbt_irec_t *prevp); /* out: previous extent entry found */
171 * Compute the worst-case number of indirect blocks that will be used
172 * for ip's delayed extent of length "len".
175 xfs_bmap_worst_indlen(
176 xfs_inode_t *ip, /* incore inode pointer */
177 xfs_filblks_t len); /* delayed extent length */
181 * Perform various validation checks on the values being returned
185 xfs_bmap_validate_ret(
189 xfs_bmbt_irec_t *mval,
193 #define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
201 xfs_fsblock_t blockno,
206 xfs_bmap_count_leaves(
213 xfs_bmap_disk_count_leaves(
214 struct xfs_mount *mp,
215 struct xfs_btree_block *block,
220 * Bmap internal routines.
223 STATIC int /* error */
225 struct xfs_btree_cur *cur,
229 int *stat) /* success/failure */
231 cur->bc_rec.b.br_startoff = off;
232 cur->bc_rec.b.br_startblock = bno;
233 cur->bc_rec.b.br_blockcount = len;
234 return xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat);
237 STATIC int /* error */
239 struct xfs_btree_cur *cur,
243 int *stat) /* success/failure */
245 cur->bc_rec.b.br_startoff = off;
246 cur->bc_rec.b.br_startblock = bno;
247 cur->bc_rec.b.br_blockcount = len;
248 return xfs_btree_lookup(cur, XFS_LOOKUP_GE, stat);
252 * Check if the inode needs to be converted to btree format.
254 static inline bool xfs_bmap_needs_btree(struct xfs_inode *ip, int whichfork)
256 return XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS &&
257 XFS_IFORK_NEXTENTS(ip, whichfork) >
258 XFS_IFORK_MAXEXT(ip, whichfork);
262 * Check if the inode should be converted to extent format.
264 static inline bool xfs_bmap_wants_extents(struct xfs_inode *ip, int whichfork)
266 return XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE &&
267 XFS_IFORK_NEXTENTS(ip, whichfork) <=
268 XFS_IFORK_MAXEXT(ip, whichfork);
272 * Update the record referred to by cur to the value given
273 * by [off, bno, len, state].
274 * This either works (return 0) or gets an EFSCORRUPTED error.
278 struct xfs_btree_cur *cur,
284 union xfs_btree_rec rec;
286 xfs_bmbt_disk_set_allf(&rec.bmbt, off, bno, len, state);
287 return xfs_btree_update(cur, &rec);
291 * Called from xfs_bmap_add_attrfork to handle btree format files.
293 STATIC int /* error */
294 xfs_bmap_add_attrfork_btree(
295 xfs_trans_t *tp, /* transaction pointer */
296 xfs_inode_t *ip, /* incore inode pointer */
297 xfs_fsblock_t *firstblock, /* first block allocated */
298 xfs_bmap_free_t *flist, /* blocks to free at commit */
299 int *flags) /* inode logging flags */
301 xfs_btree_cur_t *cur; /* btree cursor */
302 int error; /* error return value */
303 xfs_mount_t *mp; /* file system mount struct */
304 int stat; /* newroot status */
307 if (ip->i_df.if_broot_bytes <= XFS_IFORK_DSIZE(ip))
308 *flags |= XFS_ILOG_DBROOT;
310 cur = xfs_bmbt_init_cursor(mp, tp, ip, XFS_DATA_FORK);
311 cur->bc_private.b.flist = flist;
312 cur->bc_private.b.firstblock = *firstblock;
313 if ((error = xfs_bmbt_lookup_ge(cur, 0, 0, 0, &stat)))
315 /* must be at least one entry */
316 XFS_WANT_CORRUPTED_GOTO(stat == 1, error0);
317 if ((error = xfs_btree_new_iroot(cur, flags, &stat)))
320 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
321 return XFS_ERROR(ENOSPC);
323 *firstblock = cur->bc_private.b.firstblock;
324 cur->bc_private.b.allocated = 0;
325 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
329 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
334 * Called from xfs_bmap_add_attrfork to handle extents format files.
336 STATIC int /* error */
337 xfs_bmap_add_attrfork_extents(
338 xfs_trans_t *tp, /* transaction pointer */
339 xfs_inode_t *ip, /* incore inode pointer */
340 xfs_fsblock_t *firstblock, /* first block allocated */
341 xfs_bmap_free_t *flist, /* blocks to free at commit */
342 int *flags) /* inode logging flags */
344 xfs_btree_cur_t *cur; /* bmap btree cursor */
345 int error; /* error return value */
347 if (ip->i_d.di_nextents * sizeof(xfs_bmbt_rec_t) <= XFS_IFORK_DSIZE(ip))
350 error = xfs_bmap_extents_to_btree(tp, ip, firstblock, flist, &cur, 0,
351 flags, XFS_DATA_FORK);
353 cur->bc_private.b.allocated = 0;
354 xfs_btree_del_cursor(cur,
355 error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
361 * Called from xfs_bmap_add_attrfork to handle local format files.
363 STATIC int /* error */
364 xfs_bmap_add_attrfork_local(
365 xfs_trans_t *tp, /* transaction pointer */
366 xfs_inode_t *ip, /* incore inode pointer */
367 xfs_fsblock_t *firstblock, /* first block allocated */
368 xfs_bmap_free_t *flist, /* blocks to free at commit */
369 int *flags) /* inode logging flags */
371 xfs_da_args_t dargs; /* args for dir/attr code */
372 int error; /* error return value */
373 xfs_mount_t *mp; /* mount structure pointer */
375 if (ip->i_df.if_bytes <= XFS_IFORK_DSIZE(ip))
377 if (S_ISDIR(ip->i_d.di_mode)) {
379 memset(&dargs, 0, sizeof(dargs));
381 dargs.firstblock = firstblock;
383 dargs.total = mp->m_dirblkfsbs;
384 dargs.whichfork = XFS_DATA_FORK;
386 error = xfs_dir2_sf_to_block(&dargs);
388 error = xfs_bmap_local_to_extents(tp, ip, firstblock, 1, flags,
394 * Convert a delayed allocation to a real allocation.
396 STATIC int /* error */
397 xfs_bmap_add_extent_delay_real(
398 struct xfs_bmalloca *bma)
400 struct xfs_bmbt_irec *new = &bma->got;
401 int diff; /* temp value */
402 xfs_bmbt_rec_host_t *ep; /* extent entry for idx */
403 int error; /* error return value */
404 int i; /* temp state */
405 xfs_ifork_t *ifp; /* inode fork pointer */
406 xfs_fileoff_t new_endoff; /* end offset of new entry */
407 xfs_bmbt_irec_t r[3]; /* neighbor extent entries */
408 /* left is 0, right is 1, prev is 2 */
409 int rval=0; /* return value (logging flags) */
410 int state = 0;/* state bits, accessed thru macros */
411 xfs_filblks_t da_new; /* new count del alloc blocks used */
412 xfs_filblks_t da_old; /* old count del alloc blocks used */
413 xfs_filblks_t temp=0; /* value for da_new calculations */
414 xfs_filblks_t temp2=0;/* value for da_new calculations */
415 int tmp_rval; /* partial logging flags */
417 ifp = XFS_IFORK_PTR(bma->ip, XFS_DATA_FORK);
419 ASSERT(bma->idx >= 0);
420 ASSERT(bma->idx <= ifp->if_bytes / sizeof(struct xfs_bmbt_rec));
421 ASSERT(!isnullstartblock(new->br_startblock));
423 (bma->cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL));
425 XFS_STATS_INC(xs_add_exlist);
432 * Set up a bunch of variables to make the tests simpler.
434 ep = xfs_iext_get_ext(ifp, bma->idx);
435 xfs_bmbt_get_all(ep, &PREV);
436 new_endoff = new->br_startoff + new->br_blockcount;
437 ASSERT(PREV.br_startoff <= new->br_startoff);
438 ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff);
440 da_old = startblockval(PREV.br_startblock);
444 * Set flags determining what part of the previous delayed allocation
445 * extent is being replaced by a real allocation.
447 if (PREV.br_startoff == new->br_startoff)
448 state |= BMAP_LEFT_FILLING;
449 if (PREV.br_startoff + PREV.br_blockcount == new_endoff)
450 state |= BMAP_RIGHT_FILLING;
453 * Check and set flags if this segment has a left neighbor.
454 * Don't set contiguous if the combined extent would be too large.
457 state |= BMAP_LEFT_VALID;
458 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx - 1), &LEFT);
460 if (isnullstartblock(LEFT.br_startblock))
461 state |= BMAP_LEFT_DELAY;
464 if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
465 LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff &&
466 LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock &&
467 LEFT.br_state == new->br_state &&
468 LEFT.br_blockcount + new->br_blockcount <= MAXEXTLEN)
469 state |= BMAP_LEFT_CONTIG;
472 * Check and set flags if this segment has a right neighbor.
473 * Don't set contiguous if the combined extent would be too large.
474 * Also check for all-three-contiguous being too large.
476 if (bma->idx < bma->ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t) - 1) {
477 state |= BMAP_RIGHT_VALID;
478 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx + 1), &RIGHT);
480 if (isnullstartblock(RIGHT.br_startblock))
481 state |= BMAP_RIGHT_DELAY;
484 if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
485 new_endoff == RIGHT.br_startoff &&
486 new->br_startblock + new->br_blockcount == RIGHT.br_startblock &&
487 new->br_state == RIGHT.br_state &&
488 new->br_blockcount + RIGHT.br_blockcount <= MAXEXTLEN &&
489 ((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
490 BMAP_RIGHT_FILLING)) !=
491 (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
492 BMAP_RIGHT_FILLING) ||
493 LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount
495 state |= BMAP_RIGHT_CONTIG;
499 * Switch out based on the FILLING and CONTIG state bits.
501 switch (state & (BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
502 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG)) {
503 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
504 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
506 * Filling in all of a previously delayed allocation extent.
507 * The left and right neighbors are both contiguous with new.
510 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
511 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, bma->idx),
512 LEFT.br_blockcount + PREV.br_blockcount +
513 RIGHT.br_blockcount);
514 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
516 xfs_iext_remove(bma->ip, bma->idx + 1, 2, state);
517 bma->ip->i_d.di_nextents--;
518 if (bma->cur == NULL)
519 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
521 rval = XFS_ILOG_CORE;
522 error = xfs_bmbt_lookup_eq(bma->cur, RIGHT.br_startoff,
524 RIGHT.br_blockcount, &i);
527 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
528 error = xfs_btree_delete(bma->cur, &i);
531 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
532 error = xfs_btree_decrement(bma->cur, 0, &i);
535 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
536 error = xfs_bmbt_update(bma->cur, LEFT.br_startoff,
540 RIGHT.br_blockcount, LEFT.br_state);
546 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
548 * Filling in all of a previously delayed allocation extent.
549 * The left neighbor is contiguous, the right is not.
553 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
554 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, bma->idx),
555 LEFT.br_blockcount + PREV.br_blockcount);
556 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
558 xfs_iext_remove(bma->ip, bma->idx + 1, 1, state);
559 if (bma->cur == NULL)
560 rval = XFS_ILOG_DEXT;
563 error = xfs_bmbt_lookup_eq(bma->cur, LEFT.br_startoff,
564 LEFT.br_startblock, LEFT.br_blockcount,
568 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
569 error = xfs_bmbt_update(bma->cur, LEFT.br_startoff,
572 PREV.br_blockcount, LEFT.br_state);
578 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
580 * Filling in all of a previously delayed allocation extent.
581 * The right neighbor is contiguous, the left is not.
583 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
584 xfs_bmbt_set_startblock(ep, new->br_startblock);
585 xfs_bmbt_set_blockcount(ep,
586 PREV.br_blockcount + RIGHT.br_blockcount);
587 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
589 xfs_iext_remove(bma->ip, bma->idx + 1, 1, state);
590 if (bma->cur == NULL)
591 rval = XFS_ILOG_DEXT;
594 error = xfs_bmbt_lookup_eq(bma->cur, RIGHT.br_startoff,
596 RIGHT.br_blockcount, &i);
599 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
600 error = xfs_bmbt_update(bma->cur, PREV.br_startoff,
603 RIGHT.br_blockcount, PREV.br_state);
609 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
611 * Filling in all of a previously delayed allocation extent.
612 * Neither the left nor right neighbors are contiguous with
615 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
616 xfs_bmbt_set_startblock(ep, new->br_startblock);
617 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
619 bma->ip->i_d.di_nextents++;
620 if (bma->cur == NULL)
621 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
623 rval = XFS_ILOG_CORE;
624 error = xfs_bmbt_lookup_eq(bma->cur, new->br_startoff,
625 new->br_startblock, new->br_blockcount,
629 XFS_WANT_CORRUPTED_GOTO(i == 0, done);
630 bma->cur->bc_rec.b.br_state = XFS_EXT_NORM;
631 error = xfs_btree_insert(bma->cur, &i);
634 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
638 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG:
640 * Filling in the first part of a previous delayed allocation.
641 * The left neighbor is contiguous.
643 trace_xfs_bmap_pre_update(bma->ip, bma->idx - 1, state, _THIS_IP_);
644 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, bma->idx - 1),
645 LEFT.br_blockcount + new->br_blockcount);
646 xfs_bmbt_set_startoff(ep,
647 PREV.br_startoff + new->br_blockcount);
648 trace_xfs_bmap_post_update(bma->ip, bma->idx - 1, state, _THIS_IP_);
650 temp = PREV.br_blockcount - new->br_blockcount;
651 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
652 xfs_bmbt_set_blockcount(ep, temp);
653 if (bma->cur == NULL)
654 rval = XFS_ILOG_DEXT;
657 error = xfs_bmbt_lookup_eq(bma->cur, LEFT.br_startoff,
658 LEFT.br_startblock, LEFT.br_blockcount,
662 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
663 error = xfs_bmbt_update(bma->cur, LEFT.br_startoff,
671 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
672 startblockval(PREV.br_startblock));
673 xfs_bmbt_set_startblock(ep, nullstartblock(da_new));
674 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
679 case BMAP_LEFT_FILLING:
681 * Filling in the first part of a previous delayed allocation.
682 * The left neighbor is not contiguous.
684 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
685 xfs_bmbt_set_startoff(ep, new_endoff);
686 temp = PREV.br_blockcount - new->br_blockcount;
687 xfs_bmbt_set_blockcount(ep, temp);
688 xfs_iext_insert(bma->ip, bma->idx, 1, new, state);
689 bma->ip->i_d.di_nextents++;
690 if (bma->cur == NULL)
691 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
693 rval = XFS_ILOG_CORE;
694 error = xfs_bmbt_lookup_eq(bma->cur, new->br_startoff,
695 new->br_startblock, new->br_blockcount,
699 XFS_WANT_CORRUPTED_GOTO(i == 0, done);
700 bma->cur->bc_rec.b.br_state = XFS_EXT_NORM;
701 error = xfs_btree_insert(bma->cur, &i);
704 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
707 if (xfs_bmap_needs_btree(bma->ip, XFS_DATA_FORK)) {
708 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
709 bma->firstblock, bma->flist,
710 &bma->cur, 1, &tmp_rval, XFS_DATA_FORK);
715 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
716 startblockval(PREV.br_startblock) -
717 (bma->cur ? bma->cur->bc_private.b.allocated : 0));
718 ep = xfs_iext_get_ext(ifp, bma->idx + 1);
719 xfs_bmbt_set_startblock(ep, nullstartblock(da_new));
720 trace_xfs_bmap_post_update(bma->ip, bma->idx + 1, state, _THIS_IP_);
723 case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
725 * Filling in the last part of a previous delayed allocation.
726 * The right neighbor is contiguous with the new allocation.
728 temp = PREV.br_blockcount - new->br_blockcount;
729 trace_xfs_bmap_pre_update(bma->ip, bma->idx + 1, state, _THIS_IP_);
730 xfs_bmbt_set_blockcount(ep, temp);
731 xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, bma->idx + 1),
732 new->br_startoff, new->br_startblock,
733 new->br_blockcount + RIGHT.br_blockcount,
735 trace_xfs_bmap_post_update(bma->ip, bma->idx + 1, state, _THIS_IP_);
736 if (bma->cur == NULL)
737 rval = XFS_ILOG_DEXT;
740 error = xfs_bmbt_lookup_eq(bma->cur, RIGHT.br_startoff,
742 RIGHT.br_blockcount, &i);
745 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
746 error = xfs_bmbt_update(bma->cur, new->br_startoff,
755 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
756 startblockval(PREV.br_startblock));
757 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
758 xfs_bmbt_set_startblock(ep, nullstartblock(da_new));
759 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
764 case BMAP_RIGHT_FILLING:
766 * Filling in the last part of a previous delayed allocation.
767 * The right neighbor is not contiguous.
769 temp = PREV.br_blockcount - new->br_blockcount;
770 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
771 xfs_bmbt_set_blockcount(ep, temp);
772 xfs_iext_insert(bma->ip, bma->idx + 1, 1, new, state);
773 bma->ip->i_d.di_nextents++;
774 if (bma->cur == NULL)
775 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
777 rval = XFS_ILOG_CORE;
778 error = xfs_bmbt_lookup_eq(bma->cur, new->br_startoff,
779 new->br_startblock, new->br_blockcount,
783 XFS_WANT_CORRUPTED_GOTO(i == 0, done);
784 bma->cur->bc_rec.b.br_state = XFS_EXT_NORM;
785 error = xfs_btree_insert(bma->cur, &i);
788 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
791 if (xfs_bmap_needs_btree(bma->ip, XFS_DATA_FORK)) {
792 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
793 bma->firstblock, bma->flist, &bma->cur, 1,
794 &tmp_rval, XFS_DATA_FORK);
799 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
800 startblockval(PREV.br_startblock) -
801 (bma->cur ? bma->cur->bc_private.b.allocated : 0));
802 ep = xfs_iext_get_ext(ifp, bma->idx);
803 xfs_bmbt_set_startblock(ep, nullstartblock(da_new));
804 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
811 * Filling in the middle part of a previous delayed allocation.
812 * Contiguity is impossible here.
813 * This case is avoided almost all the time.
815 * We start with a delayed allocation:
817 * +ddddddddddddddddddddddddddddddddddddddddddddddddddddddd+
820 * and we are allocating:
821 * +rrrrrrrrrrrrrrrrr+
824 * and we set it up for insertion as:
825 * +ddddddddddddddddddd+rrrrrrrrrrrrrrrrr+ddddddddddddddddd+
827 * PREV @ idx LEFT RIGHT
828 * inserted at idx + 1
830 temp = new->br_startoff - PREV.br_startoff;
831 temp2 = PREV.br_startoff + PREV.br_blockcount - new_endoff;
832 trace_xfs_bmap_pre_update(bma->ip, bma->idx, 0, _THIS_IP_);
833 xfs_bmbt_set_blockcount(ep, temp); /* truncate PREV */
835 RIGHT.br_state = PREV.br_state;
836 RIGHT.br_startblock = nullstartblock(
837 (int)xfs_bmap_worst_indlen(bma->ip, temp2));
838 RIGHT.br_startoff = new_endoff;
839 RIGHT.br_blockcount = temp2;
840 /* insert LEFT (r[0]) and RIGHT (r[1]) at the same time */
841 xfs_iext_insert(bma->ip, bma->idx + 1, 2, &LEFT, state);
842 bma->ip->i_d.di_nextents++;
843 if (bma->cur == NULL)
844 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
846 rval = XFS_ILOG_CORE;
847 error = xfs_bmbt_lookup_eq(bma->cur, new->br_startoff,
848 new->br_startblock, new->br_blockcount,
852 XFS_WANT_CORRUPTED_GOTO(i == 0, done);
853 bma->cur->bc_rec.b.br_state = XFS_EXT_NORM;
854 error = xfs_btree_insert(bma->cur, &i);
857 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
860 if (xfs_bmap_needs_btree(bma->ip, XFS_DATA_FORK)) {
861 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
862 bma->firstblock, bma->flist, &bma->cur,
863 1, &tmp_rval, XFS_DATA_FORK);
868 temp = xfs_bmap_worst_indlen(bma->ip, temp);
869 temp2 = xfs_bmap_worst_indlen(bma->ip, temp2);
870 diff = (int)(temp + temp2 - startblockval(PREV.br_startblock) -
871 (bma->cur ? bma->cur->bc_private.b.allocated : 0));
873 error = xfs_icsb_modify_counters(bma->ip->i_mount,
875 -((int64_t)diff), 0);
881 ep = xfs_iext_get_ext(ifp, bma->idx);
882 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
883 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
884 trace_xfs_bmap_pre_update(bma->ip, bma->idx + 2, state, _THIS_IP_);
885 xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, bma->idx + 2),
886 nullstartblock((int)temp2));
887 trace_xfs_bmap_post_update(bma->ip, bma->idx + 2, state, _THIS_IP_);
890 da_new = temp + temp2;
893 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
894 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
895 case BMAP_LEFT_FILLING | BMAP_RIGHT_CONTIG:
896 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
897 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
898 case BMAP_LEFT_CONTIG:
899 case BMAP_RIGHT_CONTIG:
901 * These cases are all impossible.
906 /* convert to a btree if necessary */
907 if (xfs_bmap_needs_btree(bma->ip, XFS_DATA_FORK)) {
908 int tmp_logflags; /* partial log flag return val */
910 ASSERT(bma->cur == NULL);
911 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
912 bma->firstblock, bma->flist, &bma->cur,
913 da_old > 0, &tmp_logflags, XFS_DATA_FORK);
914 bma->logflags |= tmp_logflags;
919 /* adjust for changes in reserved delayed indirect blocks */
920 if (da_old || da_new) {
923 temp += bma->cur->bc_private.b.allocated;
924 ASSERT(temp <= da_old);
926 xfs_icsb_modify_counters(bma->ip->i_mount,
928 (int64_t)(da_old - temp), 0);
931 /* clear out the allocated field, done with it now in any case. */
933 bma->cur->bc_private.b.allocated = 0;
935 xfs_bmap_check_leaf_extents(bma->cur, bma->ip, XFS_DATA_FORK);
937 bma->logflags |= rval;
945 * Convert an unwritten allocation to a real allocation or vice versa.
947 STATIC int /* error */
948 xfs_bmap_add_extent_unwritten_real(
949 struct xfs_trans *tp,
950 xfs_inode_t *ip, /* incore inode pointer */
951 xfs_extnum_t *idx, /* extent number to update/insert */
952 xfs_btree_cur_t **curp, /* if *curp is null, not a btree */
953 xfs_bmbt_irec_t *new, /* new data to add to file extents */
954 xfs_fsblock_t *first, /* pointer to firstblock variable */
955 xfs_bmap_free_t *flist, /* list of extents to be freed */
956 int *logflagsp) /* inode logging flags */
958 xfs_btree_cur_t *cur; /* btree cursor */
959 xfs_bmbt_rec_host_t *ep; /* extent entry for idx */
960 int error; /* error return value */
961 int i; /* temp state */
962 xfs_ifork_t *ifp; /* inode fork pointer */
963 xfs_fileoff_t new_endoff; /* end offset of new entry */
964 xfs_exntst_t newext; /* new extent state */
965 xfs_exntst_t oldext; /* old extent state */
966 xfs_bmbt_irec_t r[3]; /* neighbor extent entries */
967 /* left is 0, right is 1, prev is 2 */
968 int rval=0; /* return value (logging flags) */
969 int state = 0;/* state bits, accessed thru macros */
974 ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
977 ASSERT(*idx <= ifp->if_bytes / sizeof(struct xfs_bmbt_rec));
978 ASSERT(!isnullstartblock(new->br_startblock));
980 XFS_STATS_INC(xs_add_exlist);
987 * Set up a bunch of variables to make the tests simpler.
990 ep = xfs_iext_get_ext(ifp, *idx);
991 xfs_bmbt_get_all(ep, &PREV);
992 newext = new->br_state;
993 oldext = (newext == XFS_EXT_UNWRITTEN) ?
994 XFS_EXT_NORM : XFS_EXT_UNWRITTEN;
995 ASSERT(PREV.br_state == oldext);
996 new_endoff = new->br_startoff + new->br_blockcount;
997 ASSERT(PREV.br_startoff <= new->br_startoff);
998 ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff);
1001 * Set flags determining what part of the previous oldext allocation
1002 * extent is being replaced by a newext allocation.
1004 if (PREV.br_startoff == new->br_startoff)
1005 state |= BMAP_LEFT_FILLING;
1006 if (PREV.br_startoff + PREV.br_blockcount == new_endoff)
1007 state |= BMAP_RIGHT_FILLING;
1010 * Check and set flags if this segment has a left neighbor.
1011 * Don't set contiguous if the combined extent would be too large.
1014 state |= BMAP_LEFT_VALID;
1015 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx - 1), &LEFT);
1017 if (isnullstartblock(LEFT.br_startblock))
1018 state |= BMAP_LEFT_DELAY;
1021 if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
1022 LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff &&
1023 LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock &&
1024 LEFT.br_state == newext &&
1025 LEFT.br_blockcount + new->br_blockcount <= MAXEXTLEN)
1026 state |= BMAP_LEFT_CONTIG;
1029 * Check and set flags if this segment has a right neighbor.
1030 * Don't set contiguous if the combined extent would be too large.
1031 * Also check for all-three-contiguous being too large.
1033 if (*idx < ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t) - 1) {
1034 state |= BMAP_RIGHT_VALID;
1035 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx + 1), &RIGHT);
1036 if (isnullstartblock(RIGHT.br_startblock))
1037 state |= BMAP_RIGHT_DELAY;
1040 if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
1041 new_endoff == RIGHT.br_startoff &&
1042 new->br_startblock + new->br_blockcount == RIGHT.br_startblock &&
1043 newext == RIGHT.br_state &&
1044 new->br_blockcount + RIGHT.br_blockcount <= MAXEXTLEN &&
1045 ((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
1046 BMAP_RIGHT_FILLING)) !=
1047 (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
1048 BMAP_RIGHT_FILLING) ||
1049 LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount
1051 state |= BMAP_RIGHT_CONTIG;
1054 * Switch out based on the FILLING and CONTIG state bits.
1056 switch (state & (BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
1057 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG)) {
1058 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
1059 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
1061 * Setting all of a previous oldext extent to newext.
1062 * The left and right neighbors are both contiguous with new.
1066 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
1067 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx),
1068 LEFT.br_blockcount + PREV.br_blockcount +
1069 RIGHT.br_blockcount);
1070 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
1072 xfs_iext_remove(ip, *idx + 1, 2, state);
1073 ip->i_d.di_nextents -= 2;
1075 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1077 rval = XFS_ILOG_CORE;
1078 if ((error = xfs_bmbt_lookup_eq(cur, RIGHT.br_startoff,
1079 RIGHT.br_startblock,
1080 RIGHT.br_blockcount, &i)))
1082 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1083 if ((error = xfs_btree_delete(cur, &i)))
1085 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1086 if ((error = xfs_btree_decrement(cur, 0, &i)))
1088 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1089 if ((error = xfs_btree_delete(cur, &i)))
1091 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1092 if ((error = xfs_btree_decrement(cur, 0, &i)))
1094 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1095 if ((error = xfs_bmbt_update(cur, LEFT.br_startoff,
1097 LEFT.br_blockcount + PREV.br_blockcount +
1098 RIGHT.br_blockcount, LEFT.br_state)))
1103 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
1105 * Setting all of a previous oldext extent to newext.
1106 * The left neighbor is contiguous, the right is not.
1110 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
1111 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx),
1112 LEFT.br_blockcount + PREV.br_blockcount);
1113 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
1115 xfs_iext_remove(ip, *idx + 1, 1, state);
1116 ip->i_d.di_nextents--;
1118 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1120 rval = XFS_ILOG_CORE;
1121 if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
1122 PREV.br_startblock, PREV.br_blockcount,
1125 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1126 if ((error = xfs_btree_delete(cur, &i)))
1128 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1129 if ((error = xfs_btree_decrement(cur, 0, &i)))
1131 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1132 if ((error = xfs_bmbt_update(cur, LEFT.br_startoff,
1134 LEFT.br_blockcount + PREV.br_blockcount,
1140 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
1142 * Setting all of a previous oldext extent to newext.
1143 * The right neighbor is contiguous, the left is not.
1145 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
1146 xfs_bmbt_set_blockcount(ep,
1147 PREV.br_blockcount + RIGHT.br_blockcount);
1148 xfs_bmbt_set_state(ep, newext);
1149 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
1150 xfs_iext_remove(ip, *idx + 1, 1, state);
1151 ip->i_d.di_nextents--;
1153 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1155 rval = XFS_ILOG_CORE;
1156 if ((error = xfs_bmbt_lookup_eq(cur, RIGHT.br_startoff,
1157 RIGHT.br_startblock,
1158 RIGHT.br_blockcount, &i)))
1160 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1161 if ((error = xfs_btree_delete(cur, &i)))
1163 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1164 if ((error = xfs_btree_decrement(cur, 0, &i)))
1166 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1167 if ((error = xfs_bmbt_update(cur, new->br_startoff,
1169 new->br_blockcount + RIGHT.br_blockcount,
1175 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
1177 * Setting all of a previous oldext extent to newext.
1178 * Neither the left nor right neighbors are contiguous with
1181 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
1182 xfs_bmbt_set_state(ep, newext);
1183 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
1186 rval = XFS_ILOG_DEXT;
1189 if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff,
1190 new->br_startblock, new->br_blockcount,
1193 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1194 if ((error = xfs_bmbt_update(cur, new->br_startoff,
1195 new->br_startblock, new->br_blockcount,
1201 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG:
1203 * Setting the first part of a previous oldext extent to newext.
1204 * The left neighbor is contiguous.
1206 trace_xfs_bmap_pre_update(ip, *idx - 1, state, _THIS_IP_);
1207 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx - 1),
1208 LEFT.br_blockcount + new->br_blockcount);
1209 xfs_bmbt_set_startoff(ep,
1210 PREV.br_startoff + new->br_blockcount);
1211 trace_xfs_bmap_post_update(ip, *idx - 1, state, _THIS_IP_);
1213 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
1214 xfs_bmbt_set_startblock(ep,
1215 new->br_startblock + new->br_blockcount);
1216 xfs_bmbt_set_blockcount(ep,
1217 PREV.br_blockcount - new->br_blockcount);
1218 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
1223 rval = XFS_ILOG_DEXT;
1226 if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
1227 PREV.br_startblock, PREV.br_blockcount,
1230 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1231 if ((error = xfs_bmbt_update(cur,
1232 PREV.br_startoff + new->br_blockcount,
1233 PREV.br_startblock + new->br_blockcount,
1234 PREV.br_blockcount - new->br_blockcount,
1237 if ((error = xfs_btree_decrement(cur, 0, &i)))
1239 error = xfs_bmbt_update(cur, LEFT.br_startoff,
1241 LEFT.br_blockcount + new->br_blockcount,
1248 case BMAP_LEFT_FILLING:
1250 * Setting the first part of a previous oldext extent to newext.
1251 * The left neighbor is not contiguous.
1253 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
1254 ASSERT(ep && xfs_bmbt_get_state(ep) == oldext);
1255 xfs_bmbt_set_startoff(ep, new_endoff);
1256 xfs_bmbt_set_blockcount(ep,
1257 PREV.br_blockcount - new->br_blockcount);
1258 xfs_bmbt_set_startblock(ep,
1259 new->br_startblock + new->br_blockcount);
1260 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
1262 xfs_iext_insert(ip, *idx, 1, new, state);
1263 ip->i_d.di_nextents++;
1265 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1267 rval = XFS_ILOG_CORE;
1268 if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
1269 PREV.br_startblock, PREV.br_blockcount,
1272 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1273 if ((error = xfs_bmbt_update(cur,
1274 PREV.br_startoff + new->br_blockcount,
1275 PREV.br_startblock + new->br_blockcount,
1276 PREV.br_blockcount - new->br_blockcount,
1279 cur->bc_rec.b = *new;
1280 if ((error = xfs_btree_insert(cur, &i)))
1282 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1286 case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
1288 * Setting the last part of a previous oldext extent to newext.
1289 * The right neighbor is contiguous with the new allocation.
1291 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
1292 xfs_bmbt_set_blockcount(ep,
1293 PREV.br_blockcount - new->br_blockcount);
1294 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
1298 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
1299 xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, *idx),
1300 new->br_startoff, new->br_startblock,
1301 new->br_blockcount + RIGHT.br_blockcount, newext);
1302 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
1305 rval = XFS_ILOG_DEXT;
1308 if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
1310 PREV.br_blockcount, &i)))
1312 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1313 if ((error = xfs_bmbt_update(cur, PREV.br_startoff,
1315 PREV.br_blockcount - new->br_blockcount,
1318 if ((error = xfs_btree_increment(cur, 0, &i)))
1320 if ((error = xfs_bmbt_update(cur, new->br_startoff,
1322 new->br_blockcount + RIGHT.br_blockcount,
1328 case BMAP_RIGHT_FILLING:
1330 * Setting the last part of a previous oldext extent to newext.
1331 * The right neighbor is not contiguous.
1333 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
1334 xfs_bmbt_set_blockcount(ep,
1335 PREV.br_blockcount - new->br_blockcount);
1336 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
1339 xfs_iext_insert(ip, *idx, 1, new, state);
1341 ip->i_d.di_nextents++;
1343 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1345 rval = XFS_ILOG_CORE;
1346 if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
1347 PREV.br_startblock, PREV.br_blockcount,
1350 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1351 if ((error = xfs_bmbt_update(cur, PREV.br_startoff,
1353 PREV.br_blockcount - new->br_blockcount,
1356 if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff,
1357 new->br_startblock, new->br_blockcount,
1360 XFS_WANT_CORRUPTED_GOTO(i == 0, done);
1361 cur->bc_rec.b.br_state = XFS_EXT_NORM;
1362 if ((error = xfs_btree_insert(cur, &i)))
1364 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1370 * Setting the middle part of a previous oldext extent to
1371 * newext. Contiguity is impossible here.
1372 * One extent becomes three extents.
1374 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
1375 xfs_bmbt_set_blockcount(ep,
1376 new->br_startoff - PREV.br_startoff);
1377 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
1380 r[1].br_startoff = new_endoff;
1381 r[1].br_blockcount =
1382 PREV.br_startoff + PREV.br_blockcount - new_endoff;
1383 r[1].br_startblock = new->br_startblock + new->br_blockcount;
1384 r[1].br_state = oldext;
1387 xfs_iext_insert(ip, *idx, 2, &r[0], state);
1389 ip->i_d.di_nextents += 2;
1391 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1393 rval = XFS_ILOG_CORE;
1394 if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
1395 PREV.br_startblock, PREV.br_blockcount,
1398 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1399 /* new right extent - oldext */
1400 if ((error = xfs_bmbt_update(cur, r[1].br_startoff,
1401 r[1].br_startblock, r[1].br_blockcount,
1404 /* new left extent - oldext */
1405 cur->bc_rec.b = PREV;
1406 cur->bc_rec.b.br_blockcount =
1407 new->br_startoff - PREV.br_startoff;
1408 if ((error = xfs_btree_insert(cur, &i)))
1410 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1412 * Reset the cursor to the position of the new extent
1413 * we are about to insert as we can't trust it after
1414 * the previous insert.
1416 if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff,
1417 new->br_startblock, new->br_blockcount,
1420 XFS_WANT_CORRUPTED_GOTO(i == 0, done);
1421 /* new middle extent - newext */
1422 cur->bc_rec.b.br_state = new->br_state;
1423 if ((error = xfs_btree_insert(cur, &i)))
1425 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1429 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
1430 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
1431 case BMAP_LEFT_FILLING | BMAP_RIGHT_CONTIG:
1432 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
1433 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
1434 case BMAP_LEFT_CONTIG:
1435 case BMAP_RIGHT_CONTIG:
1437 * These cases are all impossible.
1442 /* convert to a btree if necessary */
1443 if (xfs_bmap_needs_btree(ip, XFS_DATA_FORK)) {
1444 int tmp_logflags; /* partial log flag return val */
1446 ASSERT(cur == NULL);
1447 error = xfs_bmap_extents_to_btree(tp, ip, first, flist, &cur,
1448 0, &tmp_logflags, XFS_DATA_FORK);
1449 *logflagsp |= tmp_logflags;
1454 /* clear out the allocated field, done with it now in any case. */
1456 cur->bc_private.b.allocated = 0;
1460 xfs_bmap_check_leaf_extents(*curp, ip, XFS_DATA_FORK);
1470 * Convert a hole to a delayed allocation.
1473 xfs_bmap_add_extent_hole_delay(
1474 xfs_inode_t *ip, /* incore inode pointer */
1475 xfs_extnum_t *idx, /* extent number to update/insert */
1476 xfs_bmbt_irec_t *new) /* new data to add to file extents */
1478 xfs_ifork_t *ifp; /* inode fork pointer */
1479 xfs_bmbt_irec_t left; /* left neighbor extent entry */
1480 xfs_filblks_t newlen=0; /* new indirect size */
1481 xfs_filblks_t oldlen=0; /* old indirect size */
1482 xfs_bmbt_irec_t right; /* right neighbor extent entry */
1483 int state; /* state bits, accessed thru macros */
1484 xfs_filblks_t temp=0; /* temp for indirect calculations */
1486 ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
1488 ASSERT(isnullstartblock(new->br_startblock));
1491 * Check and set flags if this segment has a left neighbor
1494 state |= BMAP_LEFT_VALID;
1495 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx - 1), &left);
1497 if (isnullstartblock(left.br_startblock))
1498 state |= BMAP_LEFT_DELAY;
1502 * Check and set flags if the current (right) segment exists.
1503 * If it doesn't exist, we're converting the hole at end-of-file.
1505 if (*idx < ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t)) {
1506 state |= BMAP_RIGHT_VALID;
1507 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx), &right);
1509 if (isnullstartblock(right.br_startblock))
1510 state |= BMAP_RIGHT_DELAY;
1514 * Set contiguity flags on the left and right neighbors.
1515 * Don't let extents get too large, even if the pieces are contiguous.
1517 if ((state & BMAP_LEFT_VALID) && (state & BMAP_LEFT_DELAY) &&
1518 left.br_startoff + left.br_blockcount == new->br_startoff &&
1519 left.br_blockcount + new->br_blockcount <= MAXEXTLEN)
1520 state |= BMAP_LEFT_CONTIG;
1522 if ((state & BMAP_RIGHT_VALID) && (state & BMAP_RIGHT_DELAY) &&
1523 new->br_startoff + new->br_blockcount == right.br_startoff &&
1524 new->br_blockcount + right.br_blockcount <= MAXEXTLEN &&
1525 (!(state & BMAP_LEFT_CONTIG) ||
1526 (left.br_blockcount + new->br_blockcount +
1527 right.br_blockcount <= MAXEXTLEN)))
1528 state |= BMAP_RIGHT_CONTIG;
1531 * Switch out based on the contiguity flags.
1533 switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) {
1534 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
1536 * New allocation is contiguous with delayed allocations
1537 * on the left and on the right.
1538 * Merge all three into a single extent record.
1541 temp = left.br_blockcount + new->br_blockcount +
1542 right.br_blockcount;
1544 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
1545 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx), temp);
1546 oldlen = startblockval(left.br_startblock) +
1547 startblockval(new->br_startblock) +
1548 startblockval(right.br_startblock);
1549 newlen = xfs_bmap_worst_indlen(ip, temp);
1550 xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, *idx),
1551 nullstartblock((int)newlen));
1552 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
1554 xfs_iext_remove(ip, *idx + 1, 1, state);
1557 case BMAP_LEFT_CONTIG:
1559 * New allocation is contiguous with a delayed allocation
1561 * Merge the new allocation with the left neighbor.
1564 temp = left.br_blockcount + new->br_blockcount;
1566 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
1567 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx), temp);
1568 oldlen = startblockval(left.br_startblock) +
1569 startblockval(new->br_startblock);
1570 newlen = xfs_bmap_worst_indlen(ip, temp);
1571 xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, *idx),
1572 nullstartblock((int)newlen));
1573 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
1576 case BMAP_RIGHT_CONTIG:
1578 * New allocation is contiguous with a delayed allocation
1580 * Merge the new allocation with the right neighbor.
1582 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
1583 temp = new->br_blockcount + right.br_blockcount;
1584 oldlen = startblockval(new->br_startblock) +
1585 startblockval(right.br_startblock);
1586 newlen = xfs_bmap_worst_indlen(ip, temp);
1587 xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, *idx),
1589 nullstartblock((int)newlen), temp, right.br_state);
1590 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
1595 * New allocation is not contiguous with another
1596 * delayed allocation.
1597 * Insert a new entry.
1599 oldlen = newlen = 0;
1600 xfs_iext_insert(ip, *idx, 1, new, state);
1603 if (oldlen != newlen) {
1604 ASSERT(oldlen > newlen);
1605 xfs_icsb_modify_counters(ip->i_mount, XFS_SBS_FDBLOCKS,
1606 (int64_t)(oldlen - newlen), 0);
1608 * Nothing to do for disk quota accounting here.
1614 * Convert a hole to a real allocation.
1616 STATIC int /* error */
1617 xfs_bmap_add_extent_hole_real(
1618 struct xfs_bmalloca *bma,
1621 struct xfs_bmbt_irec *new = &bma->got;
1622 int error; /* error return value */
1623 int i; /* temp state */
1624 xfs_ifork_t *ifp; /* inode fork pointer */
1625 xfs_bmbt_irec_t left; /* left neighbor extent entry */
1626 xfs_bmbt_irec_t right; /* right neighbor extent entry */
1627 int rval=0; /* return value (logging flags) */
1628 int state; /* state bits, accessed thru macros */
1630 ifp = XFS_IFORK_PTR(bma->ip, whichfork);
1632 ASSERT(bma->idx >= 0);
1633 ASSERT(bma->idx <= ifp->if_bytes / sizeof(struct xfs_bmbt_rec));
1634 ASSERT(!isnullstartblock(new->br_startblock));
1636 !(bma->cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL));
1638 XFS_STATS_INC(xs_add_exlist);
1641 if (whichfork == XFS_ATTR_FORK)
1642 state |= BMAP_ATTRFORK;
1645 * Check and set flags if this segment has a left neighbor.
1648 state |= BMAP_LEFT_VALID;
1649 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx - 1), &left);
1650 if (isnullstartblock(left.br_startblock))
1651 state |= BMAP_LEFT_DELAY;
1655 * Check and set flags if this segment has a current value.
1656 * Not true if we're inserting into the "hole" at eof.
1658 if (bma->idx < ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t)) {
1659 state |= BMAP_RIGHT_VALID;
1660 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx), &right);
1661 if (isnullstartblock(right.br_startblock))
1662 state |= BMAP_RIGHT_DELAY;
1666 * We're inserting a real allocation between "left" and "right".
1667 * Set the contiguity flags. Don't let extents get too large.
1669 if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
1670 left.br_startoff + left.br_blockcount == new->br_startoff &&
1671 left.br_startblock + left.br_blockcount == new->br_startblock &&
1672 left.br_state == new->br_state &&
1673 left.br_blockcount + new->br_blockcount <= MAXEXTLEN)
1674 state |= BMAP_LEFT_CONTIG;
1676 if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
1677 new->br_startoff + new->br_blockcount == right.br_startoff &&
1678 new->br_startblock + new->br_blockcount == right.br_startblock &&
1679 new->br_state == right.br_state &&
1680 new->br_blockcount + right.br_blockcount <= MAXEXTLEN &&
1681 (!(state & BMAP_LEFT_CONTIG) ||
1682 left.br_blockcount + new->br_blockcount +
1683 right.br_blockcount <= MAXEXTLEN))
1684 state |= BMAP_RIGHT_CONTIG;
1688 * Select which case we're in here, and implement it.
1690 switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) {
1691 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
1693 * New allocation is contiguous with real allocations on the
1694 * left and on the right.
1695 * Merge all three into a single extent record.
1698 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
1699 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, bma->idx),
1700 left.br_blockcount + new->br_blockcount +
1701 right.br_blockcount);
1702 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
1704 xfs_iext_remove(bma->ip, bma->idx + 1, 1, state);
1706 XFS_IFORK_NEXT_SET(bma->ip, whichfork,
1707 XFS_IFORK_NEXTENTS(bma->ip, whichfork) - 1);
1708 if (bma->cur == NULL) {
1709 rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
1711 rval = XFS_ILOG_CORE;
1712 error = xfs_bmbt_lookup_eq(bma->cur, right.br_startoff,
1713 right.br_startblock, right.br_blockcount,
1717 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1718 error = xfs_btree_delete(bma->cur, &i);
1721 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1722 error = xfs_btree_decrement(bma->cur, 0, &i);
1725 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1726 error = xfs_bmbt_update(bma->cur, left.br_startoff,
1728 left.br_blockcount +
1729 new->br_blockcount +
1730 right.br_blockcount,
1737 case BMAP_LEFT_CONTIG:
1739 * New allocation is contiguous with a real allocation
1741 * Merge the new allocation with the left neighbor.
1744 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
1745 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, bma->idx),
1746 left.br_blockcount + new->br_blockcount);
1747 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
1749 if (bma->cur == NULL) {
1750 rval = xfs_ilog_fext(whichfork);
1753 error = xfs_bmbt_lookup_eq(bma->cur, left.br_startoff,
1754 left.br_startblock, left.br_blockcount,
1758 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1759 error = xfs_bmbt_update(bma->cur, left.br_startoff,
1761 left.br_blockcount +
1769 case BMAP_RIGHT_CONTIG:
1771 * New allocation is contiguous with a real allocation
1773 * Merge the new allocation with the right neighbor.
1775 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
1776 xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, bma->idx),
1777 new->br_startoff, new->br_startblock,
1778 new->br_blockcount + right.br_blockcount,
1780 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
1782 if (bma->cur == NULL) {
1783 rval = xfs_ilog_fext(whichfork);
1786 error = xfs_bmbt_lookup_eq(bma->cur,
1788 right.br_startblock,
1789 right.br_blockcount, &i);
1792 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1793 error = xfs_bmbt_update(bma->cur, new->br_startoff,
1795 new->br_blockcount +
1796 right.br_blockcount,
1805 * New allocation is not contiguous with another
1807 * Insert a new entry.
1809 xfs_iext_insert(bma->ip, bma->idx, 1, new, state);
1810 XFS_IFORK_NEXT_SET(bma->ip, whichfork,
1811 XFS_IFORK_NEXTENTS(bma->ip, whichfork) + 1);
1812 if (bma->cur == NULL) {
1813 rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
1815 rval = XFS_ILOG_CORE;
1816 error = xfs_bmbt_lookup_eq(bma->cur,
1819 new->br_blockcount, &i);
1822 XFS_WANT_CORRUPTED_GOTO(i == 0, done);
1823 bma->cur->bc_rec.b.br_state = new->br_state;
1824 error = xfs_btree_insert(bma->cur, &i);
1827 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1832 /* convert to a btree if necessary */
1833 if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
1834 int tmp_logflags; /* partial log flag return val */
1836 ASSERT(bma->cur == NULL);
1837 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
1838 bma->firstblock, bma->flist, &bma->cur,
1839 0, &tmp_logflags, whichfork);
1840 bma->logflags |= tmp_logflags;
1845 /* clear out the allocated field, done with it now in any case. */
1847 bma->cur->bc_private.b.allocated = 0;
1849 xfs_bmap_check_leaf_extents(bma->cur, bma->ip, whichfork);
1851 bma->logflags |= rval;
1856 * Adjust the size of the new extent based on di_extsize and rt extsize.
1859 xfs_bmap_extsize_align(
1861 xfs_bmbt_irec_t *gotp, /* next extent pointer */
1862 xfs_bmbt_irec_t *prevp, /* previous extent pointer */
1863 xfs_extlen_t extsz, /* align to this extent size */
1864 int rt, /* is this a realtime inode? */
1865 int eof, /* is extent at end-of-file? */
1866 int delay, /* creating delalloc extent? */
1867 int convert, /* overwriting unwritten extent? */
1868 xfs_fileoff_t *offp, /* in/out: aligned offset */
1869 xfs_extlen_t *lenp) /* in/out: aligned length */
1871 xfs_fileoff_t orig_off; /* original offset */
1872 xfs_extlen_t orig_alen; /* original length */
1873 xfs_fileoff_t orig_end; /* original off+len */
1874 xfs_fileoff_t nexto; /* next file offset */
1875 xfs_fileoff_t prevo; /* previous file offset */
1876 xfs_fileoff_t align_off; /* temp for offset */
1877 xfs_extlen_t align_alen; /* temp for length */
1878 xfs_extlen_t temp; /* temp for calculations */
1883 orig_off = align_off = *offp;
1884 orig_alen = align_alen = *lenp;
1885 orig_end = orig_off + orig_alen;
1888 * If this request overlaps an existing extent, then don't
1889 * attempt to perform any additional alignment.
1891 if (!delay && !eof &&
1892 (orig_off >= gotp->br_startoff) &&
1893 (orig_end <= gotp->br_startoff + gotp->br_blockcount)) {
1898 * If the file offset is unaligned vs. the extent size
1899 * we need to align it. This will be possible unless
1900 * the file was previously written with a kernel that didn't
1901 * perform this alignment, or if a truncate shot us in the
1904 temp = do_mod(orig_off, extsz);
1910 * Same adjustment for the end of the requested area.
1912 if ((temp = (align_alen % extsz))) {
1913 align_alen += extsz - temp;
1916 * If the previous block overlaps with this proposed allocation
1917 * then move the start forward without adjusting the length.
1919 if (prevp->br_startoff != NULLFILEOFF) {
1920 if (prevp->br_startblock == HOLESTARTBLOCK)
1921 prevo = prevp->br_startoff;
1923 prevo = prevp->br_startoff + prevp->br_blockcount;
1926 if (align_off != orig_off && align_off < prevo)
1929 * If the next block overlaps with this proposed allocation
1930 * then move the start back without adjusting the length,
1931 * but not before offset 0.
1932 * This may of course make the start overlap previous block,
1933 * and if we hit the offset 0 limit then the next block
1934 * can still overlap too.
1936 if (!eof && gotp->br_startoff != NULLFILEOFF) {
1937 if ((delay && gotp->br_startblock == HOLESTARTBLOCK) ||
1938 (!delay && gotp->br_startblock == DELAYSTARTBLOCK))
1939 nexto = gotp->br_startoff + gotp->br_blockcount;
1941 nexto = gotp->br_startoff;
1943 nexto = NULLFILEOFF;
1945 align_off + align_alen != orig_end &&
1946 align_off + align_alen > nexto)
1947 align_off = nexto > align_alen ? nexto - align_alen : 0;
1949 * If we're now overlapping the next or previous extent that
1950 * means we can't fit an extsz piece in this hole. Just move
1951 * the start forward to the first valid spot and set
1952 * the length so we hit the end.
1954 if (align_off != orig_off && align_off < prevo)
1956 if (align_off + align_alen != orig_end &&
1957 align_off + align_alen > nexto &&
1958 nexto != NULLFILEOFF) {
1959 ASSERT(nexto > prevo);
1960 align_alen = nexto - align_off;
1964 * If realtime, and the result isn't a multiple of the realtime
1965 * extent size we need to remove blocks until it is.
1967 if (rt && (temp = (align_alen % mp->m_sb.sb_rextsize))) {
1969 * We're not covering the original request, or
1970 * we won't be able to once we fix the length.
1972 if (orig_off < align_off ||
1973 orig_end > align_off + align_alen ||
1974 align_alen - temp < orig_alen)
1975 return XFS_ERROR(EINVAL);
1977 * Try to fix it by moving the start up.
1979 if (align_off + temp <= orig_off) {
1984 * Try to fix it by moving the end in.
1986 else if (align_off + align_alen - temp >= orig_end)
1989 * Set the start to the minimum then trim the length.
1992 align_alen -= orig_off - align_off;
1993 align_off = orig_off;
1994 align_alen -= align_alen % mp->m_sb.sb_rextsize;
1997 * Result doesn't cover the request, fail it.
1999 if (orig_off < align_off || orig_end > align_off + align_alen)
2000 return XFS_ERROR(EINVAL);
2002 ASSERT(orig_off >= align_off);
2003 ASSERT(orig_end <= align_off + align_alen);
2007 if (!eof && gotp->br_startoff != NULLFILEOFF)
2008 ASSERT(align_off + align_alen <= gotp->br_startoff);
2009 if (prevp->br_startoff != NULLFILEOFF)
2010 ASSERT(align_off >= prevp->br_startoff + prevp->br_blockcount);
2018 #define XFS_ALLOC_GAP_UNITS 4
2022 xfs_bmalloca_t *ap) /* bmap alloc argument struct */
2024 xfs_fsblock_t adjust; /* adjustment to block numbers */
2025 xfs_agnumber_t fb_agno; /* ag number of ap->firstblock */
2026 xfs_mount_t *mp; /* mount point structure */
2027 int nullfb; /* true if ap->firstblock isn't set */
2028 int rt; /* true if inode is realtime */
2030 #define ISVALID(x,y) \
2032 (x) < mp->m_sb.sb_rblocks : \
2033 XFS_FSB_TO_AGNO(mp, x) == XFS_FSB_TO_AGNO(mp, y) && \
2034 XFS_FSB_TO_AGNO(mp, x) < mp->m_sb.sb_agcount && \
2035 XFS_FSB_TO_AGBNO(mp, x) < mp->m_sb.sb_agblocks)
2037 mp = ap->ip->i_mount;
2038 nullfb = *ap->firstblock == NULLFSBLOCK;
2039 rt = XFS_IS_REALTIME_INODE(ap->ip) && ap->userdata;
2040 fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, *ap->firstblock);
2042 * If allocating at eof, and there's a previous real block,
2043 * try to use its last block as our starting point.
2045 if (ap->eof && ap->prev.br_startoff != NULLFILEOFF &&
2046 !isnullstartblock(ap->prev.br_startblock) &&
2047 ISVALID(ap->prev.br_startblock + ap->prev.br_blockcount,
2048 ap->prev.br_startblock)) {
2049 ap->blkno = ap->prev.br_startblock + ap->prev.br_blockcount;
2051 * Adjust for the gap between prevp and us.
2053 adjust = ap->offset -
2054 (ap->prev.br_startoff + ap->prev.br_blockcount);
2056 ISVALID(ap->blkno + adjust, ap->prev.br_startblock))
2057 ap->blkno += adjust;
2060 * If not at eof, then compare the two neighbor blocks.
2061 * Figure out whether either one gives us a good starting point,
2062 * and pick the better one.
2064 else if (!ap->eof) {
2065 xfs_fsblock_t gotbno; /* right side block number */
2066 xfs_fsblock_t gotdiff=0; /* right side difference */
2067 xfs_fsblock_t prevbno; /* left side block number */
2068 xfs_fsblock_t prevdiff=0; /* left side difference */
2071 * If there's a previous (left) block, select a requested
2072 * start block based on it.
2074 if (ap->prev.br_startoff != NULLFILEOFF &&
2075 !isnullstartblock(ap->prev.br_startblock) &&
2076 (prevbno = ap->prev.br_startblock +
2077 ap->prev.br_blockcount) &&
2078 ISVALID(prevbno, ap->prev.br_startblock)) {
2080 * Calculate gap to end of previous block.
2082 adjust = prevdiff = ap->offset -
2083 (ap->prev.br_startoff +
2084 ap->prev.br_blockcount);
2086 * Figure the startblock based on the previous block's
2087 * end and the gap size.
2089 * If the gap is large relative to the piece we're
2090 * allocating, or using it gives us an invalid block
2091 * number, then just use the end of the previous block.
2093 if (prevdiff <= XFS_ALLOC_GAP_UNITS * ap->length &&
2094 ISVALID(prevbno + prevdiff,
2095 ap->prev.br_startblock))
2100 * If the firstblock forbids it, can't use it,
2103 if (!rt && !nullfb &&
2104 XFS_FSB_TO_AGNO(mp, prevbno) != fb_agno)
2105 prevbno = NULLFSBLOCK;
2108 * No previous block or can't follow it, just default.
2111 prevbno = NULLFSBLOCK;
2113 * If there's a following (right) block, select a requested
2114 * start block based on it.
2116 if (!isnullstartblock(ap->got.br_startblock)) {
2118 * Calculate gap to start of next block.
2120 adjust = gotdiff = ap->got.br_startoff - ap->offset;
2122 * Figure the startblock based on the next block's
2123 * start and the gap size.
2125 gotbno = ap->got.br_startblock;
2128 * If the gap is large relative to the piece we're
2129 * allocating, or using it gives us an invalid block
2130 * number, then just use the start of the next block
2131 * offset by our length.
2133 if (gotdiff <= XFS_ALLOC_GAP_UNITS * ap->length &&
2134 ISVALID(gotbno - gotdiff, gotbno))
2136 else if (ISVALID(gotbno - ap->length, gotbno)) {
2137 gotbno -= ap->length;
2138 gotdiff += adjust - ap->length;
2142 * If the firstblock forbids it, can't use it,
2145 if (!rt && !nullfb &&
2146 XFS_FSB_TO_AGNO(mp, gotbno) != fb_agno)
2147 gotbno = NULLFSBLOCK;
2150 * No next block, just default.
2153 gotbno = NULLFSBLOCK;
2155 * If both valid, pick the better one, else the only good
2156 * one, else ap->blkno is already set (to 0 or the inode block).
2158 if (prevbno != NULLFSBLOCK && gotbno != NULLFSBLOCK)
2159 ap->blkno = prevdiff <= gotdiff ? prevbno : gotbno;
2160 else if (prevbno != NULLFSBLOCK)
2161 ap->blkno = prevbno;
2162 else if (gotbno != NULLFSBLOCK)
2170 xfs_bmalloca_t *ap) /* bmap alloc argument struct */
2172 xfs_alloctype_t atype = 0; /* type for allocation routines */
2173 int error; /* error return value */
2174 xfs_mount_t *mp; /* mount point structure */
2175 xfs_extlen_t prod = 0; /* product factor for allocators */
2176 xfs_extlen_t ralen = 0; /* realtime allocation length */
2177 xfs_extlen_t align; /* minimum allocation alignment */
2180 mp = ap->ip->i_mount;
2181 align = xfs_get_extsz_hint(ap->ip);
2182 prod = align / mp->m_sb.sb_rextsize;
2183 error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev,
2184 align, 1, ap->eof, 0,
2185 ap->conv, &ap->offset, &ap->length);
2189 ASSERT(ap->length % mp->m_sb.sb_rextsize == 0);
2192 * If the offset & length are not perfectly aligned
2193 * then kill prod, it will just get us in trouble.
2195 if (do_mod(ap->offset, align) || ap->length % align)
2198 * Set ralen to be the actual requested length in rtextents.
2200 ralen = ap->length / mp->m_sb.sb_rextsize;
2202 * If the old value was close enough to MAXEXTLEN that
2203 * we rounded up to it, cut it back so it's valid again.
2204 * Note that if it's a really large request (bigger than
2205 * MAXEXTLEN), we don't hear about that number, and can't
2206 * adjust the starting point to match it.
2208 if (ralen * mp->m_sb.sb_rextsize >= MAXEXTLEN)
2209 ralen = MAXEXTLEN / mp->m_sb.sb_rextsize;
2212 * Lock out other modifications to the RT bitmap inode.
2214 xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL);
2215 xfs_trans_ijoin(ap->tp, mp->m_rbmip, XFS_ILOCK_EXCL);
2218 * If it's an allocation to an empty file at offset 0,
2219 * pick an extent that will space things out in the rt area.
2221 if (ap->eof && ap->offset == 0) {
2222 xfs_rtblock_t uninitialized_var(rtx); /* realtime extent no */
2224 error = xfs_rtpick_extent(mp, ap->tp, ralen, &rtx);
2227 ap->blkno = rtx * mp->m_sb.sb_rextsize;
2232 xfs_bmap_adjacent(ap);
2235 * Realtime allocation, done through xfs_rtallocate_extent.
2237 atype = ap->blkno == 0 ? XFS_ALLOCTYPE_ANY_AG : XFS_ALLOCTYPE_NEAR_BNO;
2238 do_div(ap->blkno, mp->m_sb.sb_rextsize);
2241 if ((error = xfs_rtallocate_extent(ap->tp, ap->blkno, 1, ap->length,
2242 &ralen, atype, ap->wasdel, prod, &rtb)))
2244 if (rtb == NULLFSBLOCK && prod > 1 &&
2245 (error = xfs_rtallocate_extent(ap->tp, ap->blkno, 1,
2246 ap->length, &ralen, atype,
2247 ap->wasdel, 1, &rtb)))
2250 if (ap->blkno != NULLFSBLOCK) {
2251 ap->blkno *= mp->m_sb.sb_rextsize;
2252 ralen *= mp->m_sb.sb_rextsize;
2254 ap->ip->i_d.di_nblocks += ralen;
2255 xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
2257 ap->ip->i_delayed_blks -= ralen;
2259 * Adjust the disk quota also. This was reserved
2262 xfs_trans_mod_dquot_byino(ap->tp, ap->ip,
2263 ap->wasdel ? XFS_TRANS_DQ_DELRTBCOUNT :
2264 XFS_TRANS_DQ_RTBCOUNT, (long) ralen);
2272 xfs_bmap_btalloc_nullfb(
2273 struct xfs_bmalloca *ap,
2274 struct xfs_alloc_arg *args,
2277 struct xfs_mount *mp = ap->ip->i_mount;
2278 struct xfs_perag *pag;
2279 xfs_agnumber_t ag, startag;
2283 if (ap->userdata && xfs_inode_is_filestream(ap->ip))
2284 args->type = XFS_ALLOCTYPE_NEAR_BNO;
2286 args->type = XFS_ALLOCTYPE_START_BNO;
2287 args->total = ap->total;
2290 * Search for an allocation group with a single extent large enough
2291 * for the request. If one isn't found, then adjust the minimum
2292 * allocation size to the largest space found.
2294 startag = ag = XFS_FSB_TO_AGNO(mp, args->fsbno);
2295 if (startag == NULLAGNUMBER)
2298 pag = xfs_perag_get(mp, ag);
2299 while (*blen < args->maxlen) {
2300 if (!pag->pagf_init) {
2301 error = xfs_alloc_pagf_init(mp, args->tp, ag,
2302 XFS_ALLOC_FLAG_TRYLOCK);
2310 * See xfs_alloc_fix_freelist...
2312 if (pag->pagf_init) {
2313 xfs_extlen_t longest;
2314 longest = xfs_alloc_longest_free_extent(mp, pag);
2315 if (*blen < longest)
2320 if (xfs_inode_is_filestream(ap->ip)) {
2321 if (*blen >= args->maxlen)
2326 * If startag is an invalid AG, we've
2327 * come here once before and
2328 * xfs_filestream_new_ag picked the
2329 * best currently available.
2331 * Don't continue looping, since we
2332 * could loop forever.
2334 if (startag == NULLAGNUMBER)
2337 error = xfs_filestream_new_ag(ap, &ag);
2342 /* loop again to set 'blen'*/
2343 startag = NULLAGNUMBER;
2344 pag = xfs_perag_get(mp, ag);
2348 if (++ag == mp->m_sb.sb_agcount)
2353 pag = xfs_perag_get(mp, ag);
2358 * Since the above loop did a BUF_TRYLOCK, it is
2359 * possible that there is space for this request.
2361 if (notinit || *blen < ap->minlen)
2362 args->minlen = ap->minlen;
2364 * If the best seen length is less than the request
2365 * length, use the best as the minimum.
2367 else if (*blen < args->maxlen)
2368 args->minlen = *blen;
2370 * Otherwise we've seen an extent as big as maxlen,
2371 * use that as the minimum.
2374 args->minlen = args->maxlen;
2377 * set the failure fallback case to look in the selected
2378 * AG as the stream may have moved.
2380 if (xfs_inode_is_filestream(ap->ip))
2381 ap->blkno = args->fsbno = XFS_AGB_TO_FSB(mp, ag, 0);
2388 xfs_bmalloca_t *ap) /* bmap alloc argument struct */
2390 xfs_mount_t *mp; /* mount point structure */
2391 xfs_alloctype_t atype = 0; /* type for allocation routines */
2392 xfs_extlen_t align; /* minimum allocation alignment */
2393 xfs_agnumber_t fb_agno; /* ag number of ap->firstblock */
2395 xfs_alloc_arg_t args;
2397 xfs_extlen_t nextminlen = 0;
2398 int nullfb; /* true if ap->firstblock isn't set */
2405 mp = ap->ip->i_mount;
2406 align = ap->userdata ? xfs_get_extsz_hint(ap->ip) : 0;
2407 if (unlikely(align)) {
2408 error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev,
2409 align, 0, ap->eof, 0, ap->conv,
2410 &ap->offset, &ap->length);
2414 nullfb = *ap->firstblock == NULLFSBLOCK;
2415 fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, *ap->firstblock);
2417 if (ap->userdata && xfs_inode_is_filestream(ap->ip)) {
2418 ag = xfs_filestream_lookup_ag(ap->ip);
2419 ag = (ag != NULLAGNUMBER) ? ag : 0;
2420 ap->blkno = XFS_AGB_TO_FSB(mp, ag, 0);
2422 ap->blkno = XFS_INO_TO_FSB(mp, ap->ip->i_ino);
2425 ap->blkno = *ap->firstblock;
2427 xfs_bmap_adjacent(ap);
2430 * If allowed, use ap->blkno; otherwise must use firstblock since
2431 * it's in the right allocation group.
2433 if (nullfb || XFS_FSB_TO_AGNO(mp, ap->blkno) == fb_agno)
2436 ap->blkno = *ap->firstblock;
2438 * Normal allocation, done through xfs_alloc_vextent.
2440 tryagain = isaligned = 0;
2443 args.fsbno = ap->blkno;
2445 /* Trim the allocation back to the maximum an AG can fit. */
2446 args.maxlen = MIN(ap->length, XFS_ALLOC_AG_MAX_USABLE(mp));
2447 args.firstblock = *ap->firstblock;
2450 error = xfs_bmap_btalloc_nullfb(ap, &args, &blen);
2453 } else if (ap->flist->xbf_low) {
2454 if (xfs_inode_is_filestream(ap->ip))
2455 args.type = XFS_ALLOCTYPE_FIRST_AG;
2457 args.type = XFS_ALLOCTYPE_START_BNO;
2458 args.total = args.minlen = ap->minlen;
2460 args.type = XFS_ALLOCTYPE_NEAR_BNO;
2461 args.total = ap->total;
2462 args.minlen = ap->minlen;
2464 /* apply extent size hints if obtained earlier */
2465 if (unlikely(align)) {
2467 if ((args.mod = (xfs_extlen_t)do_mod(ap->offset, args.prod)))
2468 args.mod = (xfs_extlen_t)(args.prod - args.mod);
2469 } else if (mp->m_sb.sb_blocksize >= PAGE_CACHE_SIZE) {
2473 args.prod = PAGE_CACHE_SIZE >> mp->m_sb.sb_blocklog;
2474 if ((args.mod = (xfs_extlen_t)(do_mod(ap->offset, args.prod))))
2475 args.mod = (xfs_extlen_t)(args.prod - args.mod);
2478 * If we are not low on available data blocks, and the
2479 * underlying logical volume manager is a stripe, and
2480 * the file offset is zero then try to allocate data
2481 * blocks on stripe unit boundary.
2482 * NOTE: ap->aeof is only set if the allocation length
2483 * is >= the stripe unit and the allocation offset is
2484 * at the end of file.
2486 if (!ap->flist->xbf_low && ap->aeof) {
2488 args.alignment = mp->m_dalign;
2492 * Adjust for alignment
2494 if (blen > args.alignment && blen <= args.maxlen)
2495 args.minlen = blen - args.alignment;
2496 args.minalignslop = 0;
2499 * First try an exact bno allocation.
2500 * If it fails then do a near or start bno
2501 * allocation with alignment turned on.
2505 args.type = XFS_ALLOCTYPE_THIS_BNO;
2508 * Compute the minlen+alignment for the
2509 * next case. Set slop so that the value
2510 * of minlen+alignment+slop doesn't go up
2511 * between the calls.
2513 if (blen > mp->m_dalign && blen <= args.maxlen)
2514 nextminlen = blen - mp->m_dalign;
2516 nextminlen = args.minlen;
2517 if (nextminlen + mp->m_dalign > args.minlen + 1)
2519 nextminlen + mp->m_dalign -
2522 args.minalignslop = 0;
2526 args.minalignslop = 0;
2528 args.minleft = ap->minleft;
2529 args.wasdel = ap->wasdel;
2531 args.userdata = ap->userdata;
2532 if ((error = xfs_alloc_vextent(&args)))
2534 if (tryagain && args.fsbno == NULLFSBLOCK) {
2536 * Exact allocation failed. Now try with alignment
2540 args.fsbno = ap->blkno;
2541 args.alignment = mp->m_dalign;
2542 args.minlen = nextminlen;
2543 args.minalignslop = 0;
2545 if ((error = xfs_alloc_vextent(&args)))
2548 if (isaligned && args.fsbno == NULLFSBLOCK) {
2550 * allocation failed, so turn off alignment and
2554 args.fsbno = ap->blkno;
2556 if ((error = xfs_alloc_vextent(&args)))
2559 if (args.fsbno == NULLFSBLOCK && nullfb &&
2560 args.minlen > ap->minlen) {
2561 args.minlen = ap->minlen;
2562 args.type = XFS_ALLOCTYPE_START_BNO;
2563 args.fsbno = ap->blkno;
2564 if ((error = xfs_alloc_vextent(&args)))
2567 if (args.fsbno == NULLFSBLOCK && nullfb) {
2569 args.type = XFS_ALLOCTYPE_FIRST_AG;
2570 args.total = ap->minlen;
2572 if ((error = xfs_alloc_vextent(&args)))
2574 ap->flist->xbf_low = 1;
2576 if (args.fsbno != NULLFSBLOCK) {
2578 * check the allocation happened at the same or higher AG than
2579 * the first block that was allocated.
2581 ASSERT(*ap->firstblock == NULLFSBLOCK ||
2582 XFS_FSB_TO_AGNO(mp, *ap->firstblock) ==
2583 XFS_FSB_TO_AGNO(mp, args.fsbno) ||
2584 (ap->flist->xbf_low &&
2585 XFS_FSB_TO_AGNO(mp, *ap->firstblock) <
2586 XFS_FSB_TO_AGNO(mp, args.fsbno)));
2588 ap->blkno = args.fsbno;
2589 if (*ap->firstblock == NULLFSBLOCK)
2590 *ap->firstblock = args.fsbno;
2591 ASSERT(nullfb || fb_agno == args.agno ||
2592 (ap->flist->xbf_low && fb_agno < args.agno));
2593 ap->length = args.len;
2594 ap->ip->i_d.di_nblocks += args.len;
2595 xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
2597 ap->ip->i_delayed_blks -= args.len;
2599 * Adjust the disk quota also. This was reserved
2602 xfs_trans_mod_dquot_byino(ap->tp, ap->ip,
2603 ap->wasdel ? XFS_TRANS_DQ_DELBCOUNT :
2604 XFS_TRANS_DQ_BCOUNT,
2607 ap->blkno = NULLFSBLOCK;
2614 * xfs_bmap_alloc is called by xfs_bmapi to allocate an extent for a file.
2615 * It figures out where to ask the underlying allocator to put the new extent.
2619 xfs_bmalloca_t *ap) /* bmap alloc argument struct */
2621 if (XFS_IS_REALTIME_INODE(ap->ip) && ap->userdata)
2622 return xfs_bmap_rtalloc(ap);
2623 return xfs_bmap_btalloc(ap);
2627 * Transform a btree format file with only one leaf node, where the
2628 * extents list will fit in the inode, into an extents format file.
2629 * Since the file extents are already in-core, all we have to do is
2630 * give up the space for the btree root and pitch the leaf block.
2632 STATIC int /* error */
2633 xfs_bmap_btree_to_extents(
2634 xfs_trans_t *tp, /* transaction pointer */
2635 xfs_inode_t *ip, /* incore inode pointer */
2636 xfs_btree_cur_t *cur, /* btree cursor */
2637 int *logflagsp, /* inode logging flags */
2638 int whichfork) /* data or attr fork */
2641 struct xfs_btree_block *cblock;/* child btree block */
2642 xfs_fsblock_t cbno; /* child block number */
2643 xfs_buf_t *cbp; /* child block's buffer */
2644 int error; /* error return value */
2645 xfs_ifork_t *ifp; /* inode fork data */
2646 xfs_mount_t *mp; /* mount point structure */
2647 __be64 *pp; /* ptr to block address */
2648 struct xfs_btree_block *rblock;/* root btree block */
2651 ifp = XFS_IFORK_PTR(ip, whichfork);
2652 ASSERT(ifp->if_flags & XFS_IFEXTENTS);
2653 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE);
2654 rblock = ifp->if_broot;
2655 ASSERT(be16_to_cpu(rblock->bb_level) == 1);
2656 ASSERT(be16_to_cpu(rblock->bb_numrecs) == 1);
2657 ASSERT(xfs_bmbt_maxrecs(mp, ifp->if_broot_bytes, 0) == 1);
2658 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, rblock, 1, ifp->if_broot_bytes);
2659 cbno = be64_to_cpu(*pp);
2662 if ((error = xfs_btree_check_lptr(cur, cbno, 1)))
2665 if ((error = xfs_btree_read_bufl(mp, tp, cbno, 0, &cbp,
2666 XFS_BMAP_BTREE_REF)))
2668 cblock = XFS_BUF_TO_BLOCK(cbp);
2669 if ((error = xfs_btree_check_block(cur, cblock, 0, cbp)))
2671 xfs_bmap_add_free(cbno, 1, cur->bc_private.b.flist, mp);
2672 ip->i_d.di_nblocks--;
2673 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L);
2674 xfs_trans_binval(tp, cbp);
2675 if (cur->bc_bufs[0] == cbp)
2676 cur->bc_bufs[0] = NULL;
2677 xfs_iroot_realloc(ip, -1, whichfork);
2678 ASSERT(ifp->if_broot == NULL);
2679 ASSERT((ifp->if_flags & XFS_IFBROOT) == 0);
2680 XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS);
2681 *logflagsp = XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
2686 * Called by xfs_bmapi to update file extent records and the btree
2687 * after removing space (or undoing a delayed allocation).
2689 STATIC int /* error */
2690 xfs_bmap_del_extent(
2691 xfs_inode_t *ip, /* incore inode pointer */
2692 xfs_trans_t *tp, /* current transaction pointer */
2693 xfs_extnum_t *idx, /* extent number to update/delete */
2694 xfs_bmap_free_t *flist, /* list of extents to be freed */
2695 xfs_btree_cur_t *cur, /* if null, not a btree */
2696 xfs_bmbt_irec_t *del, /* data to remove from extents */
2697 int *logflagsp, /* inode logging flags */
2698 int whichfork) /* data or attr fork */
2700 xfs_filblks_t da_new; /* new delay-alloc indirect blocks */
2701 xfs_filblks_t da_old; /* old delay-alloc indirect blocks */
2702 xfs_fsblock_t del_endblock=0; /* first block past del */
2703 xfs_fileoff_t del_endoff; /* first offset past del */
2704 int delay; /* current block is delayed allocated */
2705 int do_fx; /* free extent at end of routine */
2706 xfs_bmbt_rec_host_t *ep; /* current extent entry pointer */
2707 int error; /* error return value */
2708 int flags; /* inode logging flags */
2709 xfs_bmbt_irec_t got; /* current extent entry */
2710 xfs_fileoff_t got_endoff; /* first offset past got */
2711 int i; /* temp state */
2712 xfs_ifork_t *ifp; /* inode fork pointer */
2713 xfs_mount_t *mp; /* mount structure */
2714 xfs_filblks_t nblks; /* quota/sb block count */
2715 xfs_bmbt_irec_t new; /* new record to be inserted */
2717 uint qfield; /* quota field to update */
2718 xfs_filblks_t temp; /* for indirect length calculations */
2719 xfs_filblks_t temp2; /* for indirect length calculations */
2722 XFS_STATS_INC(xs_del_exlist);
2724 if (whichfork == XFS_ATTR_FORK)
2725 state |= BMAP_ATTRFORK;
2728 ifp = XFS_IFORK_PTR(ip, whichfork);
2729 ASSERT((*idx >= 0) && (*idx < ifp->if_bytes /
2730 (uint)sizeof(xfs_bmbt_rec_t)));
2731 ASSERT(del->br_blockcount > 0);
2732 ep = xfs_iext_get_ext(ifp, *idx);
2733 xfs_bmbt_get_all(ep, &got);
2734 ASSERT(got.br_startoff <= del->br_startoff);
2735 del_endoff = del->br_startoff + del->br_blockcount;
2736 got_endoff = got.br_startoff + got.br_blockcount;
2737 ASSERT(got_endoff >= del_endoff);
2738 delay = isnullstartblock(got.br_startblock);
2739 ASSERT(isnullstartblock(del->br_startblock) == delay);
2744 * If deleting a real allocation, must free up the disk space.
2747 flags = XFS_ILOG_CORE;
2749 * Realtime allocation. Free it and record di_nblocks update.
2751 if (whichfork == XFS_DATA_FORK && XFS_IS_REALTIME_INODE(ip)) {
2755 ASSERT(do_mod(del->br_blockcount,
2756 mp->m_sb.sb_rextsize) == 0);
2757 ASSERT(do_mod(del->br_startblock,
2758 mp->m_sb.sb_rextsize) == 0);
2759 bno = del->br_startblock;
2760 len = del->br_blockcount;
2761 do_div(bno, mp->m_sb.sb_rextsize);
2762 do_div(len, mp->m_sb.sb_rextsize);
2763 error = xfs_rtfree_extent(tp, bno, (xfs_extlen_t)len);
2767 nblks = len * mp->m_sb.sb_rextsize;
2768 qfield = XFS_TRANS_DQ_RTBCOUNT;
2771 * Ordinary allocation.
2775 nblks = del->br_blockcount;
2776 qfield = XFS_TRANS_DQ_BCOUNT;
2779 * Set up del_endblock and cur for later.
2781 del_endblock = del->br_startblock + del->br_blockcount;
2783 if ((error = xfs_bmbt_lookup_eq(cur, got.br_startoff,
2784 got.br_startblock, got.br_blockcount,
2787 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
2789 da_old = da_new = 0;
2791 da_old = startblockval(got.br_startblock);
2797 * Set flag value to use in switch statement.
2798 * Left-contig is 2, right-contig is 1.
2800 switch (((got.br_startoff == del->br_startoff) << 1) |
2801 (got_endoff == del_endoff)) {
2804 * Matches the whole extent. Delete the entry.
2806 xfs_iext_remove(ip, *idx, 1,
2807 whichfork == XFS_ATTR_FORK ? BMAP_ATTRFORK : 0);
2812 XFS_IFORK_NEXT_SET(ip, whichfork,
2813 XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
2814 flags |= XFS_ILOG_CORE;
2816 flags |= xfs_ilog_fext(whichfork);
2819 if ((error = xfs_btree_delete(cur, &i)))
2821 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
2826 * Deleting the first part of the extent.
2828 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2829 xfs_bmbt_set_startoff(ep, del_endoff);
2830 temp = got.br_blockcount - del->br_blockcount;
2831 xfs_bmbt_set_blockcount(ep, temp);
2833 temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
2835 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
2836 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2840 xfs_bmbt_set_startblock(ep, del_endblock);
2841 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2843 flags |= xfs_ilog_fext(whichfork);
2846 if ((error = xfs_bmbt_update(cur, del_endoff, del_endblock,
2847 got.br_blockcount - del->br_blockcount,
2854 * Deleting the last part of the extent.
2856 temp = got.br_blockcount - del->br_blockcount;
2857 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2858 xfs_bmbt_set_blockcount(ep, temp);
2860 temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
2862 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
2863 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2867 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2869 flags |= xfs_ilog_fext(whichfork);
2872 if ((error = xfs_bmbt_update(cur, got.br_startoff,
2874 got.br_blockcount - del->br_blockcount,
2881 * Deleting the middle of the extent.
2883 temp = del->br_startoff - got.br_startoff;
2884 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2885 xfs_bmbt_set_blockcount(ep, temp);
2886 new.br_startoff = del_endoff;
2887 temp2 = got_endoff - del_endoff;
2888 new.br_blockcount = temp2;
2889 new.br_state = got.br_state;
2891 new.br_startblock = del_endblock;
2892 flags |= XFS_ILOG_CORE;
2894 if ((error = xfs_bmbt_update(cur,
2896 got.br_startblock, temp,
2899 if ((error = xfs_btree_increment(cur, 0, &i)))
2901 cur->bc_rec.b = new;
2902 error = xfs_btree_insert(cur, &i);
2903 if (error && error != ENOSPC)
2906 * If get no-space back from btree insert,
2907 * it tried a split, and we have a zero
2908 * block reservation.
2909 * Fix up our state and return the error.
2911 if (error == ENOSPC) {
2913 * Reset the cursor, don't trust
2914 * it after any insert operation.
2916 if ((error = xfs_bmbt_lookup_eq(cur,
2921 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
2923 * Update the btree record back
2924 * to the original value.
2926 if ((error = xfs_bmbt_update(cur,
2933 * Reset the extent record back
2934 * to the original value.
2936 xfs_bmbt_set_blockcount(ep,
2939 error = XFS_ERROR(ENOSPC);
2942 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
2944 flags |= xfs_ilog_fext(whichfork);
2945 XFS_IFORK_NEXT_SET(ip, whichfork,
2946 XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
2948 ASSERT(whichfork == XFS_DATA_FORK);
2949 temp = xfs_bmap_worst_indlen(ip, temp);
2950 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
2951 temp2 = xfs_bmap_worst_indlen(ip, temp2);
2952 new.br_startblock = nullstartblock((int)temp2);
2953 da_new = temp + temp2;
2954 while (da_new > da_old) {
2958 xfs_bmbt_set_startblock(ep,
2959 nullstartblock((int)temp));