[PATCH 3/4] xfs: refactor delalloc indlen reservation split into helper
Brian Foster
bfoster at redhat.com
Fri Mar 4 08:40:30 CST 2016
The delayed allocation indirect reservation splitting code is not
sufficient in some cases where a delalloc extent is split in two. In
preparation for enhancements to this code, refactor the current indlen
distribution algorithm into a new helper function.
Signed-off-by: Brian Foster <bfoster at redhat.com>
---
fs/xfs/libxfs/xfs_bmap.c | 77 ++++++++++++++++++++++++++++++++++++------------
1 file changed, 58 insertions(+), 19 deletions(-)
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index f57a9e9..27e6689 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -4721,6 +4721,51 @@ error0:
}
/*
+ * When a delalloc extent is split (e.g., due to a hole punch), the original
+ * indlen reservation must be shared across the two new extents that are left
+ * behind.
+ *
+ * Given the original reservation and the worst case indlen for the two new
+ * extents (as calculated by xfs_bmap_worst_indlen()), split the original
+ * reservation fairly across the two new extents.
+ */
+static void
+xfs_bmap_split_indlen(
+ xfs_filblks_t ores, /* original res. */
+ xfs_filblks_t *indlen1, /* ext1 worst indlen */
+ xfs_filblks_t *indlen2) /* ext2 worst indlen */
+{
+ xfs_filblks_t nres; /* new total res. */
+ xfs_filblks_t temp;
+ xfs_filblks_t temp2;
+
+ temp = *indlen1;
+ temp2 = *indlen2;
+ nres = temp + temp2;
+
+ /*
+ * The only blocks available are those reserved for the original extent.
+ * Therefore, we have to skim blocks off each of the new reservations so
+ * long as the new total reservation is greater than the original.
+ */
+ while (nres > ores) {
+ if (temp) {
+ temp--;
+ nres--;
+ }
+ if (nres == ores)
+ break;
+ if (temp2) {
+ temp2--;
+ nres--;
+ }
+ }
+
+ *indlen1 = temp;
+ *indlen2 = temp2;
+}
+
+/*
* Called by xfs_bmapi to update file extent records and the btree
* after removing space (or undoing a delayed allocation).
*/
@@ -4985,27 +5030,21 @@ xfs_bmap_del_extent(
XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
} else {
ASSERT(whichfork == XFS_DATA_FORK);
- temp = xfs_bmap_worst_indlen(ip, temp);
+
+ /*
+ * Distribute the original indlen reservation across the
+ * two new extents.
+ */
+ temp = xfs_bmap_worst_indlen(ip, got.br_blockcount);
+ temp2 = xfs_bmap_worst_indlen(ip, new.br_blockcount);
+ xfs_bmap_split_indlen(da_old, &temp, &temp2);
+ da_new = temp + temp2;
+
+ /*
+ * Set the reservation for each extent.
+ */
xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
- temp2 = xfs_bmap_worst_indlen(ip, temp2);
new.br_startblock = nullstartblock((int)temp2);
- da_new = temp + temp2;
- while (da_new > da_old) {
- if (temp) {
- temp--;
- da_new--;
- xfs_bmbt_set_startblock(ep,
- nullstartblock((int)temp));
- }
- if (da_new == da_old)
- break;
- if (temp2) {
- temp2--;
- da_new--;
- new.br_startblock =
- nullstartblock((int)temp2);
- }
- }
}
trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
xfs_iext_insert(ip, *idx + 1, 1, &new, state);
--
2.4.3
More information about the xfs
mailing list