xfs
[Top] [All Lists]

[PATCH] cleanup pb_target handling

To: linux-xfs@xxxxxxxxxxx
Subject: [PATCH] cleanup pb_target handling
From: Christoph Hellwig <hch@xxxxxxxxxxxxx>
Date: Sun, 28 Apr 2002 16:20:24 +0100
Sender: owner-linux-xfs@xxxxxxxxxxx
User-agent: Mutt/1.2.5.1i
As FUP to the kmem_cache_alloc discussion I took a closer look at
pagebuf_lock_enable(), one of the two users.

The #ifdef <kernel > 2.4.13> case doesn't need the zeroing at all
as it initializes every single field directly afterwards, the other
version doesn't even have a chance of compiling as it assumes
pbr_addrspace is directly embedded in the pb_target instead of beeing
a pointer.  When makeing it directly embedded that code looks like
it actually compiles and needs some additional zeroing in pbr_addrspace.
But this is - like the other initialization only needed at the first
time the object is initialized, as all these have to be back in the
initial state when freed, makeing it ideal for a slab constructor.

Next problem in that area: pagebuf_lock_enable() directly bdput()s
the struct block_device whiches inode is refernced through PB_ADDR_SPACE
over all of the pb_targets lifetime - adding pbr_bdev and doing bdput in
pagebuf_lock_disable() fixes that.

While in that area I've also applied a number of other cleanups:

 - keep a address_space pointer in pb_target for post 2.4.13 kernels,
   we always need it, not the inode.
 - add PBT_ADDR_SPACE to get address_space from pb_target.  This
   fixes two places that couldn't compile for old kernels.
 - rename pbr_addrspace to pb_mapping.
 - rename pagebuf_registration_cach to pagebuf_target_cache.
 - don't NULL-initialize pagebuf_target_cache, it's in .bss.
 - move pagebuf_locking_init to the bottom of the file, where
   pagebuf_terminate_locking lives aswell and Linux sources tend to
   have init/exit code.
 - kill _pagebuf_registration_free, there was only one user, the
   name is misleading and when seeing the caller the NULL check
   was unneeded..
 - kill misleading comment above pagebuf_lock_enable
 - general cleanup of pagebuf_lock_enable
 - fix misleading comment about mapping->page_lock
 - cleanup pagebuf_locking_init, the calling chain doesn't allow
   the cache to be non-NULL without serious kernel bugs.


Index: linux/fs/xfs/pagebuf/page_buf.h
===================================================================
RCS file: /cvs/linux-2.4-xfs/linux/fs/xfs/pagebuf/page_buf.h,v
retrieving revision 1.10
diff -u -u -r1.10 page_buf.h
--- linux/fs/xfs/pagebuf/page_buf.h     2002/03/28 19:45:15     1.10
+++ linux/fs/xfs/pagebuf/page_buf.h     2002/04/28 14:59:34
@@ -179,29 +179,23 @@
 #define PBF_DONE(pb) (((pb)->pb_flags & (PBF_PARTIAL|PBF_NONE)) == 0)
 
 
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,13)
 
 typedef struct pb_target {
        kdev_t                  pbr_device;
        unsigned int            pbr_blocksize;
        unsigned int            pbr_blocksize_bits;
-       struct address_space    *pbr_addrspace;
-} pb_target_t;
-
-#define PB_ADDR_SPACE(pb)  (&(pb)->pb_target->pbr_addrspace)
-
+       struct block_device     *pbr_bdev;
+#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,4,13))
+       struct address_space    pbr_mapping;
+#define PBT_ADDR_SPACE(pbt)    (&(pbt)->pbr_mapping)
 #else
-
-typedef struct pb_target {
-       kdev_t                  pbr_device;
-       unsigned int            pbr_blocksize;
-       unsigned int            pbr_blocksize_bits;
-       struct inode            *pbr_inode;
+       struct address_space    *pbr_mapping;
+#define PBT_ADDR_SPACE(pbt)    ((pbt)->pbr_mapping)
+#endif
 } pb_target_t;
 
-#define PB_ADDR_SPACE(pb)  ((pb)->pb_target->pbr_inode->i_mapping)
+#define PB_ADDR_SPACE(pb) PBT_ADDR_SPACE((pb)->pb_target)
 
-#endif
 
 struct page_buf_s;
 
Index: linux/fs/xfs/pagebuf/page_buf_locking.c
===================================================================
RCS file: /cvs/linux-2.4-xfs/linux/fs/xfs/pagebuf/page_buf_locking.c,v
retrieving revision 1.9
diff -u -u -r1.9 page_buf_locking.c
--- linux/fs/xfs/pagebuf/page_buf_locking.c     2002/04/26 01:35:51     1.9
+++ linux/fs/xfs/pagebuf/page_buf_locking.c     2002/04/28 14:59:36
@@ -1,5 +1,6 @@
 /*
  * Copyright (c) 2002 Silicon Graphics, Inc.  All Rights Reserved.
+ * Portions Copyright (c) 2002 Christoph Hellwig.  All Rights Reserved.
  * 
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of version 2 of the GNU General Public License as
@@ -59,37 +60,13 @@
 
 pb_hash_t      pbhash[NHASH];
 
-static kmem_cache_t *pagebuf_registration_cache = NULL;
+static kmem_cache_t *pagebuf_target_cache;
 
 /*
  *     Initialization and Termination
  */
 
 /*
- *     pagebuf_locking_init
- */
-
-int __init pagebuf_locking_init(void)
-{
-       int     i;
-
-       if (pagebuf_registration_cache == NULL) {
-               pagebuf_registration_cache = kmem_cache_create("page_buf_reg_t",
-                                               sizeof(pb_target_t),
-                                               0, 0, NULL, NULL);
-               if (pagebuf_registration_cache == NULL)
-                       return(-ENOMEM);
-       }
-
-       for (i = 0; i < NHASH; i++) {
-               spin_lock_init(&pbhash[i].pb_hash_lock);
-               INIT_LIST_HEAD(&pbhash[i].pb_hash);
-       }
-
-       return(0);
-}
-
-/*
  * Hash calculation
  */
 
@@ -116,21 +93,6 @@
  */
 
 /*
- *     _pagebuf_registration_free
- *
- *     Free a page_buf_registration_t object.  The caller must hold
- *     the pagebuf_registered_inodes_lock.
- */
-
-static void
-_pagebuf_registration_free(pb_target_t *target)
-{
-       if (target != NULL) {
-               kmem_cache_free(pagebuf_registration_cache, target);
-       }
-}
-
-/*
  *     _pagebuf_get_lockable_buffer
  *
  *     Looks up, and creates if absent, a lockable buffer for
@@ -363,23 +325,15 @@
                     pb_target_t *target)  /* inode for buffers         */
 {
        destroy_buffers(target->pbr_device);
-       truncate_inode_pages(target->pbr_inode->i_mapping, 0LL);
-       _pagebuf_registration_free(target);
+       truncate_inode_pages(PBT_ADDR_SPACE(target), 0LL);
+       bdput(target->pbr_bdev);
+       kmem_cache_free(pagebuf_target_cache, target);
 
        return(0);
 }
 
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,13)
-static struct address_space_operations pagebuf_aops = {
-       sync_page:      block_sync_page,
-};
-#endif
-
 /*
  *     pagebuf_lock_enable
- *
- *     pagebuf_lock_enable enables buffer object locking for an inode.
- *     This call fails with -EBUSY if buffers are in use for this inode.
  */
 
 pb_target_t *
@@ -388,37 +342,23 @@
        struct super_block *sb)
 {
        pb_target_t     *target;
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,13)
-       struct block_device *bdev = bdget(kdev);
 
-       if (!bdev)
-               return NULL;
+       target = kmem_cache_alloc(pagebuf_target_cache, SLAB_KERNEL);
+       if (target) {
+               target->pbr_bdev = bdget(kdev);
+               if (!target->pbr_bdev)
+                       goto fail;
+               target->pbr_device = kdev;
+               pagebuf_target_blocksize(target, PAGE_CACHE_SIZE);
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,4,13))
+               target->pbr_mapping = target->pbr_bdev->bd_inode->i_mapping;
 #endif
-
-       target = kmem_cache_zalloc(pagebuf_registration_cache,
-                                               SLAB_KERNEL);
-       if (target == NULL) {
-               return(NULL);
        }
-       target->pbr_device = kdev;
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,13)
-       target->pbr_inode = bdev->bd_inode;
-       bdput(bdev);
-#else
-       INIT_LIST_HEAD(&target->pbr_addrspace.clean_pages);
-       INIT_LIST_HEAD(&target->pbr_addrspace.dirty_pages);
-       INIT_LIST_HEAD(&target->pbr_addrspace.locked_pages);
-       spin_lock_init(&target->pbr_addrspace.i_shared_lock);
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,4)
-       /* Also needed for AC kernels */
-       spin_lock_init(&target->pbr_addrspace.page_lock);
-#endif
-       target->pbr_addrspace.a_ops = &pagebuf_aops;
-#endif
-       pagebuf_target_blocksize(target, PAGE_CACHE_SIZE);
 
        return target;
+fail:
+       kmem_cache_free(pagebuf_target_cache, target);
+       return NULL;
 }
 
 void
@@ -435,7 +375,7 @@
        pb_target_t     *target)
 {
        destroy_buffers(target->pbr_device);
-       truncate_inode_pages(target->pbr_inode->i_mapping, 0LL);
+       truncate_inode_pages(PBT_ADDR_SPACE(target), 0LL);
        return 0;
 }
 
@@ -459,6 +399,65 @@
        PB_TRACE(pb, PB_TRACE_REC(unlock), 0);
 }
 
+#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,4,13))
+static struct address_space_operations pagebuf_aops = {
+       sync_page:      block_sync_page,
+};
+
+static void pb_target_ctor(void *p, kmem_cache_t * cachep, unsigned long flags)
+{
+       struct address_space *mapping = &(((pb_target_t *)p)->pbr_mapping);
+
+       if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
+                       SLAB_CTOR_CONSTRUCTOR) {
+
+               memset(mapping, 0, sizeof(*mapping));
+               INIT_LIST_HEAD(&mapping->clean_pages);
+               INIT_LIST_HEAD(&mapping->dirty_pages);
+               INIT_LIST_HEAD(&mapping->locked_pages);
+               mapping->a_ops = &pagebuf_aops;
+               spin_lock_init(&mapping->i_shared_lock);
+
+               /*
+                * David Miller's pagecache lock splitting patch adds an
+                * additional per-mapping spinlock.  Normally we do not
+                * care for out-of-tree patches, but this one is included
+                * in Red Hat's vendor kernels up to the heavily modified
+                * 2.4.9 series (7.1/7.2/AS).
+                *
+                * As there is no feature macro for the new lock, all we
+                * can do is to test against a macro added by this patch:
+                */
+#ifdef PAGECACHE_LOCK
+               spin_lock_init(&mapping->page_lock);
+#endif
+       }
+}
+#else
+#define pb_target_ctor NULL
+#endif /* LINUX_VERSION_CODE <= KERNEL_VERSION(2,4,13) */
+
+
+/*
+ *     pagebuf_locking_init
+ */
+
+int __init pagebuf_locking_init(void)
+{
+       int i;
+
+       pagebuf_target_cache = kmem_cache_create("pb_target",
+                       sizeof(pb_target_t), 0, 0, pb_target_ctor, NULL);
+       if (!pagebuf_target_cache)
+               return -ENOMEM;
+
+       for (i = 0; i < NHASH; i++) {
+               spin_lock_init(&pbhash[i].pb_hash_lock);
+               INIT_LIST_HEAD(&pbhash[i].pb_hash);
+       }
+
+       return 0;
+}
 
 /*
  *     pagebuf_terminate_locking.  Do not define as __exit, it is called from
@@ -467,6 +466,6 @@
 
 void pagebuf_locking_terminate(void)
 {
-       if (pagebuf_registration_cache != NULL)
-               kmem_cache_destroy(pagebuf_registration_cache);
+       if (pagebuf_target_cache)
+               kmem_cache_destroy(pagebuf_target_cache);
 }


<Prev in Thread] Current Thread [Next in Thread>