If we use unhashed inodes, then the ->dirty_inode callback will
not tell us about pure page dirtying (I_DIRTY_PAGES) events. hence
we will miss moving the inode to the superblock dirty list in
this case.
Hence we need to hook ->set_page_dirty to move the inode to the
dirty list so that it will be written back in a timely fashion.
Signed-off-by: Dave Chinner <david@xxxxxxxxxxxxx>
---
fs/xfs/linux-2.6/xfs_aops.c | 30 ++++++++++++++++++++++++++++++
1 files changed, 30 insertions(+), 0 deletions(-)
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c
index f42f80a..da6ea64 100644
--- a/fs/xfs/linux-2.6/xfs_aops.c
+++ b/fs/xfs/linux-2.6/xfs_aops.c
@@ -1568,11 +1568,41 @@ xfs_vm_invalidatepage(
block_invalidatepage(page, offset);
}
+/*
+ * The page has been marked dirty by the VM. Because we are using unhashed
+ * inodes now, we have to move the inode to the sb->s_dirty_list ourselves as
+ * we will not get a callback through ->dirty_inode() for I_DIRTY_PAGES.
+ *
+ * The use of the inode_lock is purely a temporary step to make this code work
+ * safely. Ultimately XFS will track all the dirty inode state internally in
+ * the radix trees, so this lock will disappear.
+ */
+STATIC int
+xfs_vm_set_page_dirty(
+ struct page *page)
+{
+ extern spinlock_t inode_lock;
+ struct inode *inode = page->mapping->host;
+
+ if (inode->i_state & I_DIRTY_PAGES)
+ return __set_page_dirty_buffers(page);
+
+ spin_lock(&inode_lock);
+ if (!(inode->i_state & I_DIRTY)) {
+ inode->dirtied_when = jiffies;
+ list_move(&inode->i_list, &inode->i_sb->s_dirty);
+ }
+ inode->i_state |= I_DIRTY_PAGES;
+ spin_unlock(&inode_lock);
+ return __set_page_dirty_buffers(page);
+}
+
const struct address_space_operations xfs_address_space_operations = {
.readpage = xfs_vm_readpage,
.readpages = xfs_vm_readpages,
.writepage = xfs_vm_writepage,
.writepages = xfs_vm_writepages,
+ .set_page_dirty = xfs_vm_set_page_dirty,
.sync_page = block_sync_page,
.releasepage = xfs_vm_releasepage,
.invalidatepage = xfs_vm_invalidatepage,
--
1.5.6
|