From: Dave Chinner <dchinner@xxxxxxxxxx>
To avoid concerns that a single list and lock tracking the unaligned IOs
will not scale appropriately, create multiple lists and locks and chose them by
hashing the unaligned block being zeroed.
Signed-off-by: Dave Chinner <dchinner@xxxxxxxxxx>
---
fs/direct-io.c | 53 ++++++++++++++++++++++++++++++++++++++++-------------
1 files changed, 40 insertions(+), 13 deletions(-)
diff --git a/fs/direct-io.c b/fs/direct-io.c
index 611524e..95dcba4 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -152,8 +152,29 @@ struct dio_zero_block {
atomic_t ref; /* reference count */
};
-DEFINE_SPINLOCK(dio_zero_block_lock);
-LIST_HEAD(dio_zero_block_list);
+#define DIO_ZERO_BLOCK_NR 37LL
+struct dio_zero_block_head {
+ struct list_head list;
+ spinlock_t lock;
+};
+
+struct dio_zero_block_head dio_zero_blocks[DIO_ZERO_BLOCK_NR];
+#define to_dio_zero_list(zb) (&dio_zero_blocks[zb % DIO_ZERO_BLOCK_NR].list)
+#define to_dio_zero_lock(zb) (&dio_zero_blocks[zb % DIO_ZERO_BLOCK_NR].lock)
+
+
+static int __init
+dio_init_zero_block(void)
+{
+ int i;
+
+ for (i = 0; i < DIO_ZERO_BLOCK_NR; i++) {
+ spin_lock_init(&dio_zero_blocks[i].lock);
+ INIT_LIST_HEAD(&dio_zero_blocks[i].list);
+ }
+ return 0;
+}
+subsys_initcall(dio_init_zero_block);
/*
* Add a filesystem block to the list of blocks we are tracking.
@@ -162,6 +183,8 @@ static void
dio_start_zero_block(struct dio *dio, sector_t zero_block)
{
struct dio_zero_block *zb;
+ struct list_head *list = to_dio_zero_list(zero_block);
+ spinlock_t *lock = to_dio_zero_lock(zero_block);
zb = kmalloc(sizeof(*zb), GFP_NOIO);
if (!zb)
@@ -172,9 +195,9 @@ dio_start_zero_block(struct dio *dio, sector_t zero_block)
zb->dio = dio;
atomic_set(&zb->ref, 1);
- spin_lock(&dio_zero_block_lock);
- list_add(&zb->dio_list, &dio_zero_block_list);
- spin_unlock(&dio_zero_block_lock);
+ spin_lock(lock);
+ list_add(&zb->dio_list, list);
+ spin_unlock(lock);
}
static void
@@ -194,20 +217,22 @@ static int
dio_wait_zero_block(struct dio *dio, sector_t zero_block)
{
struct dio_zero_block *zb;
+ struct list_head *list = to_dio_zero_list(zero_block);
+ spinlock_t *lock = to_dio_zero_lock(zero_block);
- spin_lock(&dio_zero_block_lock);
- list_for_each_entry(zb, &dio_zero_block_list, dio_list) {
+ spin_lock(lock);
+ list_for_each_entry(zb, list, dio_list) {
if (zb->dio->inode != dio->inode)
continue;
if (zb->zero_block != zero_block)
continue;
atomic_inc(&zb->ref);
- spin_unlock(&dio_zero_block_lock);
+ spin_unlock(lock);
wait_event(zb->wq, (list_empty(&zb->dio_list)));
dio_drop_zero_block(zb);
return 1;
}
- spin_unlock(&dio_zero_block_lock);
+ spin_unlock(lock);
return 0;
}
@@ -217,20 +242,22 @@ dio_wait_zero_block(struct dio *dio, sector_t zero_block)
static void dio_end_zero_block(struct dio *dio, sector_t zero_block)
{
struct dio_zero_block *zb;
+ struct list_head *list = to_dio_zero_list(zero_block);
+ spinlock_t *lock = to_dio_zero_lock(zero_block);
- spin_lock(&dio_zero_block_lock);
- list_for_each_entry(zb, &dio_zero_block_list, dio_list) {
+ spin_lock(lock);
+ list_for_each_entry(zb, list, dio_list) {
if (zb->dio->inode != dio->inode)
continue;
if (zb->zero_block != zero_block)
continue;
list_del_init(&zb->dio_list);
- spin_unlock(&dio_zero_block_lock);
+ spin_unlock(lock);
wake_up(&zb->wq);
dio_drop_zero_block(zb);
return;
}
- spin_unlock(&dio_zero_block_lock);
+ spin_unlock(lock);
}
/*
--
1.7.1
|