xfs
[Top] [All Lists]

[PATCH 05/14] xfs: make AIL tail pushing independent of the grant lock

To: xfs@xxxxxxxxxxx
Subject: [PATCH 05/14] xfs: make AIL tail pushing independent of the grant lock
From: Dave Chinner <david@xxxxxxxxxxxxx>
Date: Mon, 29 Nov 2010 12:38:23 +1100
In-reply-to: <1290994712-21376-1-git-send-email-david@xxxxxxxxxxxxx>
References: <1290994712-21376-1-git-send-email-david@xxxxxxxxxxxxx>
From: Dave Chinner <dchinner@xxxxxxxxxx>

The xlog_grant_push_ail() currently takes the grant lock internally to sample
the tail lsn, last sync lsn and the reserve grant head. Most of the callers
already hold the grant lock but have to drop it before calling
xlog_grant_push_ail(). This is a left over from when the AIl tail pushing was
done in line and hence xlog_grant_push_ail had to drop the grant lock. AIL push
is now done in another thread and hence we can safely hold the grant lock over
the entire xlog_grant_push_ail call.

Push the grant lock outside of xlog_grant_push_ail() and pass the tail lsn and
last sync lsn values as parameters to make the internals completely oblivious
to the source of the values and independent of whatever locking or
synchronisation they require.  This will reduce traffic on the grant lock by
itself, but this is only one step in preparing for the complete removal of the
grant lock.

While there, clean up the indenting of xlog_grant_push_ail() to match the
rest of the XFS code.

Signed-off-by: Dave Chinner <dchinner@xxxxxxxxxx>
---
 fs/xfs/xfs_log.c |  123 ++++++++++++++++++++++++++++--------------------------
 1 files changed, 64 insertions(+), 59 deletions(-)

diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index 93b5b2d..a35ef8f 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -71,7 +71,9 @@ STATIC void xlog_state_want_sync(xlog_t       *log, 
xlog_in_core_t *iclog);
 /* local functions to manipulate grant head */
 STATIC int  xlog_grant_log_space(struct log            *log,
                                 struct xlog_ticket     *xtic);
-STATIC void xlog_grant_push_ail(xfs_mount_t    *mp,
+STATIC void xlog_grant_push_ail(struct log     *log,
+                               xfs_lsn_t       tail_lsn,
+                               xfs_lsn_t       last_sync_lsn,
                                int             need_bytes);
 STATIC void xlog_regrant_reserve_log_space(xlog_t       *log,
                                           xlog_ticket_t *ticket);
@@ -356,7 +358,11 @@ xfs_log_reserve(
 
                trace_xfs_log_reserve(log, internal_ticket);
 
-               xlog_grant_push_ail(mp, internal_ticket->t_unit_res);
+               spin_lock(&log->l_grant_lock);
+               xlog_grant_push_ail(log, log->l_tail_lsn,
+                                   log->l_last_sync_lsn,
+                                   internal_ticket->t_unit_res);
+               spin_unlock(&log->l_grant_lock);
                retval = xlog_regrant_write_log_space(log, internal_ticket);
        } else {
                /* may sleep if need to allocate more tickets */
@@ -370,9 +376,12 @@ xfs_log_reserve(
 
                trace_xfs_log_reserve(log, internal_ticket);
 
-               xlog_grant_push_ail(mp,
+               spin_lock(&log->l_grant_lock);
+               xlog_grant_push_ail(log, log->l_tail_lsn,
+                                   log->l_last_sync_lsn,
                                    (internal_ticket->t_unit_res *
                                     internal_ticket->t_cnt));
+               spin_unlock(&log->l_grant_lock);
                retval = xlog_grant_log_space(log, internal_ticket);
        }
 
@@ -1226,60 +1235,58 @@ xlog_commit_record(
  * water mark.  In this manner, we would be creating a low water mark.
  */
 STATIC void
-xlog_grant_push_ail(xfs_mount_t        *mp,
-                   int         need_bytes)
-{
-    xlog_t     *log = mp->m_log;       /* pointer to the log */
-    xfs_lsn_t  tail_lsn;               /* lsn of the log tail */
-    xfs_lsn_t  threshold_lsn = 0;      /* lsn we'd like to be at */
-    int                free_blocks;            /* free blocks left to write to 
*/
-    int                free_bytes;             /* free bytes left to write to 
*/
-    int                threshold_block;        /* block in lsn we'd like to be 
at */
-    int                threshold_cycle;        /* lsn cycle we'd like to be at 
*/
-    int                free_threshold;
-
-    ASSERT(BTOBB(need_bytes) < log->l_logBBsize);
-
-    spin_lock(&log->l_grant_lock);
-    free_bytes = xlog_space_left(log->l_logsize, log->l_tail_lsn,
-                                               log->l_grant_reserve_lsn);
-    tail_lsn = log->l_tail_lsn;
-    free_blocks = BTOBBT(free_bytes);
-
-    /*
-     * Set the threshold for the minimum number of free blocks in the
-     * log to the maximum of what the caller needs, one quarter of the
-     * log, and 256 blocks.
-     */
-    free_threshold = BTOBB(need_bytes);
-    free_threshold = MAX(free_threshold, (log->l_logBBsize >> 2));
-    free_threshold = MAX(free_threshold, 256);
-    if (free_blocks < free_threshold) {
-       threshold_block = BLOCK_LSN(tail_lsn) + free_threshold;
-       threshold_cycle = CYCLE_LSN(tail_lsn);
-       if (threshold_block >= log->l_logBBsize) {
-           threshold_block -= log->l_logBBsize;
-           threshold_cycle += 1;
-       }
-       threshold_lsn = xlog_assign_lsn(threshold_cycle, threshold_block);
-
-       /* Don't pass in an lsn greater than the lsn of the last
-        * log record known to be on disk.
+xlog_grant_push_ail(
+       struct log      *log,
+       xfs_lsn_t       tail_lsn,
+       xfs_lsn_t       last_sync_lsn,
+       int             need_bytes)
+{
+       xfs_lsn_t       threshold_lsn = 0;
+       int             free_blocks;
+       int             free_bytes;
+       int             threshold_block;
+       int             threshold_cycle;
+       int             free_threshold;
+
+       ASSERT(BTOBB(need_bytes) < log->l_logBBsize);
+
+       free_bytes = xlog_space_left(log->l_logsize, tail_lsn,
+                                       log->l_grant_reserve_lsn);
+       free_blocks = BTOBBT(free_bytes);
+
+       /*
+        * Set the threshold for the minimum number of free blocks in the
+        * log to the maximum of what the caller needs, one quarter of the
+        * log, and 256 blocks.
         */
-       if (XFS_LSN_CMP(threshold_lsn, log->l_last_sync_lsn) > 0)
-           threshold_lsn = log->l_last_sync_lsn;
-    }
-    spin_unlock(&log->l_grant_lock);
-
-    /*
-     * Get the transaction layer to kick the dirty buffers out to
-     * disk asynchronously. No point in trying to do this if
-     * the filesystem is shutting down.
-     */
-    if (threshold_lsn &&
-       !XLOG_FORCED_SHUTDOWN(log))
-           xfs_trans_ail_push(log->l_ailp, threshold_lsn);
-}      /* xlog_grant_push_ail */
+       free_threshold = BTOBB(need_bytes);
+       free_threshold = MAX(free_threshold, (log->l_logBBsize >> 2));
+       free_threshold = MAX(free_threshold, 256);
+       if (free_blocks < free_threshold) {
+               threshold_block = BLOCK_LSN(tail_lsn) + free_threshold;
+               threshold_cycle = CYCLE_LSN(tail_lsn);
+               if (threshold_block >= log->l_logBBsize) {
+                       threshold_block -= log->l_logBBsize;
+                       threshold_cycle += 1;
+               }
+               threshold_lsn = xlog_assign_lsn(threshold_cycle,
+                                               threshold_block);
+               /*
+                * Don't pass in an lsn greater than the lsn of the last
+                * log record known to be on disk.
+                */
+               if (XFS_LSN_CMP(threshold_lsn, last_sync_lsn) > 0)
+                       threshold_lsn = last_sync_lsn;
+       }
+
+       /*
+        * Get the transaction layer to kick the dirty buffers out to
+        * disk asynchronously. No point in trying to do this if
+        * the filesystem is shutting down.
+        */
+       if (threshold_lsn && !XLOG_FORCED_SHUTDOWN(log))
+               xfs_trans_ail_push(log->l_ailp, threshold_lsn);
+}
 
 /*
  * The bdstrat callback function for log bufs. This gives us a central
@@ -2568,9 +2575,8 @@ redo:
                if (list_empty(&tic->t_queue))
                        list_add_tail(&tic->t_queue, &log->l_reserveq);
 
-               spin_unlock(&log->l_grant_lock);
-               xlog_grant_push_ail(log->l_mp, need_bytes);
-               spin_lock(&log->l_grant_lock);
+               xlog_grant_push_ail(log, log->l_tail_lsn,
+                                   log->l_last_sync_lsn, need_bytes);
 
                XFS_STATS_INC(xs_sleep_logspace);
                trace_xfs_log_grant_sleep(log, tic);
@@ -2676,9 +2682,8 @@ redo:
                                goto redo;
                }
 
-               spin_unlock(&log->l_grant_lock);
-               xlog_grant_push_ail(log->l_mp, need_bytes);
-               spin_lock(&log->l_grant_lock);
+               xlog_grant_push_ail(log, log->l_tail_lsn,
+                                   log->l_last_sync_lsn, need_bytes);
 
                XFS_STATS_INC(xs_sleep_logspace);
                trace_xfs_log_regrant_write_sleep(log, tic);
-- 
1.7.2.3

<Prev in Thread] Current Thread [Next in Thread>