xfs
[Top] [All Lists]

Re: performance degraded 2.4.8 -> 2.4.9?

To: Andi Kleen <ak@xxxxxxx>
Subject: Re: performance degraded 2.4.8 -> 2.4.9?
From: Steve Lord <lord@xxxxxxx>
Date: 14 Nov 2001 10:17:55 -0600
Cc: linux-xfs@xxxxxxxxxxx
In-reply-to: <20011114165901.A28687@xxxxxxxxxxxxx>
References: <3BF1A6F2.273B49C4@xxxxxxxx> <1005696924.21227.4.camel@UberGeek> <3BF27ACB.B744479A@xxxxxxxx> <1005752233.25611.2.camel@xxxxxxxxxxxxxxxxxxxx> <20011114165901.A28687@xxxxxxxxxxxxx>
Sender: owner-linux-xfs@xxxxxxxxxxx
On Wed, 2001-11-14 at 09:59, Andi Kleen wrote:
> > Its a myth! We do not have changes all over the kernel, and we 
> > definitely do not have any changes in the block layer. The last
> 
> Hmm, last time I checked you had some support for delayed buffers there...
> Not that I think that particular it is very intrusive, but I had some 
> problems with it in the past porting it to a custom kernel.

Hmm, yes, I guess I was not thinking of this as block layer, it is not
really in paths that dd to a raw device would hit, for the record:

*** /src/lord/linux/fs/buffer.c Wed Nov 14 10:03:08 2001
--- fs/buffer.c Wed Nov 14 09:22:42 2001
***************
*** 119,124 ****
--- 119,181 ----
  int bdflush_min[N_PARAM] = {  0,  10,    5,   25,  0,   1*HZ,   0, 0,
0};
  int bdflush_max[N_PARAM] = {100,50000, 20000, 20000,10000*HZ, 6000*HZ,
100, 0, 0};
  
+ #define buffer_delay_busy(bh) \
+       (buffer_delay(bh) && bh->b_page && PageLocked(bh->b_page))
+ 
+ int
+ _write_buffer(struct buffer_head *bh, int wait)
+ {
+       struct page *page = bh->b_page;
+       int ret = 0;
+ 
+       if (!page) 
+               BUG();
+       if (wait) {
+               lock_page(page);
+       } else if (TryLockPage(page)) {
+               if (current->need_resched)
+                       schedule();
+               return 0;
+       }
+ 
+       if (buffer_delay(bh)) {
+               ret = page->mapping->a_ops->writepage(page);
+       } else {
+               UnlockPage(page);
+       }
+       return ret;
+ }
+ 
+ static inline int
+ write_buffer(struct buffer_head *bh, int wait)
+ {
+       if (!buffer_delay(bh)) {
+               ll_rw_block(WRITE, 1, &bh);
+               return 1;
+       } else
+               return _write_buffer(bh, wait);
+ }
+ 
+ static inline int
+ write_buffer_locked(struct buffer_head *bh, int wait)
+ {
+       int     ret;
+ 
+       if (!buffer_delay(bh)) {
+               submit_bh(WRITE, bh);
+               return 1;
+       } else {
+               clear_bit(BH_Lock, &bh->b_state);
+               smp_mb__after_clear_bit();
+               ret = _write_buffer(bh, wait);
+               if ((ret == 0) && waitqueue_active(&bh->b_wait))
+                       wake_up(&bh->b_wait);
+               return ret;
+       }
+ }
+ 
+ 
+ 
  void unlock_buffer(struct buffer_head *bh)
  {
        clear_bit(BH_Wait_IO, &bh->b_state);
***************
*** 204,209 ****
--- 261,274 ----
  
                if (dev && bh->b_dev != dev)
                        continue;
+               if (test_bit(BH_Delay, &bh->b_state)) {
+                       spin_unlock(&lru_list_lock);
+                       if (count)
+                               write_locked_buffers(array, count);
+                       _write_buffer(bh, 0);
+                       return -EAGAIN;
+               }
+ 
                if (test_and_set_bit(BH_Lock, &bh->b_state))
                        continue;
                if (atomic_set_buffer_clean(bh)) {
***************
*** 830,836 ****
                        if (buffer_dirty(bh)) {
                                get_bh(bh);
                                spin_unlock(&lru_list_lock);
!                               ll_rw_block(WRITE, 1, &bh);
                                brelse(bh);
                                spin_lock(&lru_list_lock);
                        }
--- 895,901 ----
                        if (buffer_dirty(bh)) {
                                get_bh(bh);
                                spin_unlock(&lru_list_lock);
!                               write_buffer(bh, 1);
                                brelse(bh);
                                spin_lock(&lru_list_lock);
                        }
***************
*** 1352,1357 ****
--- 1417,1423 ----
                clear_bit(BH_Mapped, &bh->b_state);
                clear_bit(BH_Req, &bh->b_state);
                clear_bit(BH_New, &bh->b_state);
+               clear_bit(BH_Delay, &bh->b_state);
                unlock_buffer(bh);
        }
  }
***************
*** 2407,2413 ****
                get_bh(bh);
                set_bit(BH_launder, &bh->b_state);
                bh->b_end_io = end_buffer_io_sync;
!               submit_bh(WRITE, bh);
                tryagain = 0;
        } while ((bh = bh->b_this_page) != head);
  
--- 2473,2479 ----
                get_bh(bh);
                set_bit(BH_launder, &bh->b_state);
                bh->b_end_io = end_buffer_io_sync;
!               write_buffer_locked(bh, 0);
                tryagain = 0;
        } while ((bh = bh->b_this_page) != head);
  

Which when you analyze what it does when buffers are not marked BH_delay
is actually nothing.

Steve

> 
> -Andi
-- 

Steve Lord                                      voice: +1-651-683-3511
Principal Engineer, Filesystem Software         email: lord@xxxxxxx


<Prev in Thread] Current Thread [Next in Thread>