xfs
[Top] [All Lists]

[PATCH 2/9] xfs: move DIO mapping size calculation

To: xfs@xxxxxxxxxxx
Subject: [PATCH 2/9] xfs: move DIO mapping size calculation
From: Dave Chinner <david@xxxxxxxxxxxxx>
Date: Wed, 15 Apr 2015 14:51:45 +1000
Delivered-to: xfs@xxxxxxxxxxx
In-reply-to: <1429073512-20035-1-git-send-email-david@xxxxxxxxxxxxx>
References: <1429073512-20035-1-git-send-email-david@xxxxxxxxxxxxx>
From: Dave Chinner <dchinner@xxxxxxxxxx>

The mapping size calculation is done last in __xfs_get_blocks(), but
we are going to need the actual mapping size we will use to map the
direct IO correctly in xfs_map_direct(). Factor out the calculation
for code clarity, and move the call to be the first operation in
mapping the extent to the returned buffer.

Signed-off-by: Dave Chinner <dchinner@xxxxxxxxxx>
Reviewed-by: Brian Foster <bfoster@xxxxxxxxxx>
---
 fs/xfs/xfs_aops.c | 79 ++++++++++++++++++++++++++++++++-----------------------
 1 file changed, 46 insertions(+), 33 deletions(-)

diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index 489ed20..4a29399 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -1249,6 +1249,47 @@ xfs_map_direct(
        }
 }
 
+
+/*
+ * If this is O_DIRECT or the mpage code calling tell them how large the 
mapping
+ * is, so that we can avoid repeated get_blocks calls.
+ *
+ * If the mapping spans EOF, then we have to break the mapping up as the 
mapping
+ * for blocks beyond EOF must be marked new so that sub block regions can be
+ * correctly zeroed. We can't do this for mappings within EOF unless the 
mapping
+ * was just allocated or is unwritten, otherwise the callers would overwrite
+ * existing data with zeros. Hence we have to split the mapping into a range up
+ * to and including EOF, and a second mapping for beyond EOF.
+ */
+static void
+xfs_map_trim_size(
+       struct inode            *inode,
+       sector_t                iblock,
+       struct buffer_head      *bh_result,
+       struct xfs_bmbt_irec    *imap,
+       xfs_off_t               offset,
+       ssize_t                 size)
+{
+       xfs_off_t               mapping_size;
+
+       mapping_size = imap->br_startoff + imap->br_blockcount - iblock;
+       mapping_size <<= inode->i_blkbits;
+
+       ASSERT(mapping_size > 0);
+       if (mapping_size > size)
+               mapping_size = size;
+       if (offset < i_size_read(inode) &&
+           offset + mapping_size >= i_size_read(inode)) {
+               /* limit mapping to block that spans EOF */
+               mapping_size = roundup_64(i_size_read(inode) - offset,
+                                         1 << inode->i_blkbits);
+       }
+       if (mapping_size > LONG_MAX)
+               mapping_size = LONG_MAX;
+
+       bh_result->b_size = mapping_size;
+}
+
 STATIC int
 __xfs_get_blocks(
        struct inode            *inode,
@@ -1347,6 +1388,11 @@ __xfs_get_blocks(
                goto out_unlock;
        }
 
+       /* trim mapping down to size requested */
+       if (direct || size > (1 << inode->i_blkbits))
+               xfs_map_trim_size(inode, iblock, bh_result,
+                                 &imap, offset, size);
+
        /*
         * For unwritten extents do not report a disk address in the buffered
         * read case (treat as if we're reading into a hole).
@@ -1392,39 +1438,6 @@ __xfs_get_blocks(
                }
        }
 
-       /*
-        * If this is O_DIRECT or the mpage code calling tell them how large
-        * the mapping is, so that we can avoid repeated get_blocks calls.
-        *
-        * If the mapping spans EOF, then we have to break the mapping up as the
-        * mapping for blocks beyond EOF must be marked new so that sub block
-        * regions can be correctly zeroed. We can't do this for mappings within
-        * EOF unless the mapping was just allocated or is unwritten, otherwise
-        * the callers would overwrite existing data with zeros. Hence we have
-        * to split the mapping into a range up to and including EOF, and a
-        * second mapping for beyond EOF.
-        */
-       if (direct || size > (1 << inode->i_blkbits)) {
-               xfs_off_t               mapping_size;
-
-               mapping_size = imap.br_startoff + imap.br_blockcount - iblock;
-               mapping_size <<= inode->i_blkbits;
-
-               ASSERT(mapping_size > 0);
-               if (mapping_size > size)
-                       mapping_size = size;
-               if (offset < i_size_read(inode) &&
-                   offset + mapping_size >= i_size_read(inode)) {
-                       /* limit mapping to block that spans EOF */
-                       mapping_size = roundup_64(i_size_read(inode) - offset,
-                                                 1 << inode->i_blkbits);
-               }
-               if (mapping_size > LONG_MAX)
-                       mapping_size = LONG_MAX;
-
-               bh_result->b_size = mapping_size;
-       }
-
        return 0;
 
 out_unlock:
-- 
2.0.0

<Prev in Thread] Current Thread [Next in Thread>