xfs
[Top] [All Lists]

Re: [PATCH v5 4/7] dax: add support for fsync/sync

To: Ross Zwisler <ross.zwisler@xxxxxxxxxxxxxxx>
Subject: Re: [PATCH v5 4/7] dax: add support for fsync/sync
From: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx>
Date: Tue, 22 Dec 2015 14:46:25 -0800
Cc: linux-kernel@xxxxxxxxxxxxxxx, "H. Peter Anvin" <hpa@xxxxxxxxx>, "J. Bruce Fields" <bfields@xxxxxxxxxxxx>, "Theodore Ts'o" <tytso@xxxxxxx>, Alexander Viro <viro@xxxxxxxxxxxxxxxxxx>, Andreas Dilger <adilger.kernel@xxxxxxxxx>, Dave Chinner <david@xxxxxxxxxxxxx>, Ingo Molnar <mingo@xxxxxxxxxx>, Jan Kara <jack@xxxxxxxx>, Jeff Layton <jlayton@xxxxxxxxxxxxxxx>, Matthew Wilcox <willy@xxxxxxxxxxxxxxx>, Thomas Gleixner <tglx@xxxxxxxxxxxxx>, linux-ext4@xxxxxxxxxxxxxxx, linux-fsdevel@xxxxxxxxxxxxxxx, linux-mm@xxxxxxxxx, linux-nvdimm@xxxxxxxxxxx, x86@xxxxxxxxxx, xfs@xxxxxxxxxxx, Dan Williams <dan.j.williams@xxxxxxxxx>, Matthew Wilcox <matthew.r.wilcox@xxxxxxxxx>, Dave Hansen <dave.hansen@xxxxxxxxxxxxxxx>
Delivered-to: xfs@xxxxxxxxxxx
In-reply-to: <1450502540-8744-5-git-send-email-ross.zwisler@xxxxxxxxxxxxxxx>
References: <1450502540-8744-1-git-send-email-ross.zwisler@xxxxxxxxxxxxxxx> <1450502540-8744-5-git-send-email-ross.zwisler@xxxxxxxxxxxxxxx>
On Fri, 18 Dec 2015 22:22:17 -0700 Ross Zwisler <ross.zwisler@xxxxxxxxxxxxxxx> 
wrote:

> To properly handle fsync/msync in an efficient way DAX needs to track dirty
> pages so it is able to flush them durably to media on demand.
> 
> The tracking of dirty pages is done via the radix tree in struct
> address_space.  This radix tree is already used by the page writeback
> infrastructure for tracking dirty pages associated with an open file, and
> it already has support for exceptional (non struct page*) entries.  We
> build upon these features to add exceptional entries to the radix tree for
> DAX dirty PMD or PTE pages at fault time.

I'm getting a few rejects here against other pending changes.  Things
look OK to me but please do runtime test the end result as it resides
in linux-next.  Which will be next year.

>
> ...
>
> +static void dax_writeback_one(struct address_space *mapping, pgoff_t index,
> +             void *entry)
> +{
> +     struct radix_tree_root *page_tree = &mapping->page_tree;
> +     int type = RADIX_DAX_TYPE(entry);
> +     struct radix_tree_node *node;
> +     void **slot;
> +
> +     if (type != RADIX_DAX_PTE && type != RADIX_DAX_PMD) {
> +             WARN_ON_ONCE(1);
> +             return;
> +     }

--- a/fs/dax.c~dax-add-support-for-fsync-sync-fix
+++ a/fs/dax.c
@@ -383,10 +383,8 @@ static void dax_writeback_one(struct add
        struct radix_tree_node *node;
        void **slot;
 
-       if (type != RADIX_DAX_PTE && type != RADIX_DAX_PMD) {
-               WARN_ON_ONCE(1);
+       if (WARN_ON_ONCE(type != RADIX_DAX_PTE && type != RADIX_DAX_PMD))
                return;
-       }
 
        spin_lock_irq(&mapping->tree_lock);
        /*

> +     spin_lock_irq(&mapping->tree_lock);
> +     /*
> +      * Regular page slots are stabilized by the page lock even
> +      * without the tree itself locked.  These unlocked entries
> +      * need verification under the tree lock.
> +      */
> +     if (!__radix_tree_lookup(page_tree, index, &node, &slot))
> +             goto unlock;
> +     if (*slot != entry)
> +             goto unlock;
> +
> +     /* another fsync thread may have already written back this entry */
> +     if (!radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_TOWRITE))
> +             goto unlock;
> +
> +     radix_tree_tag_clear(page_tree, index, PAGECACHE_TAG_TOWRITE);
> +
> +     if (type == RADIX_DAX_PMD)
> +             wb_cache_pmem(RADIX_DAX_ADDR(entry), PMD_SIZE);
> +     else
> +             wb_cache_pmem(RADIX_DAX_ADDR(entry), PAGE_SIZE);
> + unlock:
> +     spin_unlock_irq(&mapping->tree_lock);
> +}
> +
> +/*
> + * Flush the mapping to the persistent domain within the byte range of 
> [start,
> + * end]. This is required by data integrity operations to ensure file data is
> + * on persistent storage prior to completion of the operation.
> + */
> +void dax_writeback_mapping_range(struct address_space *mapping, loff_t start,
> +             loff_t end)
> +{
> +     struct inode *inode = mapping->host;
> +     pgoff_t indices[PAGEVEC_SIZE];
> +     pgoff_t start_page, end_page;
> +     struct pagevec pvec;
> +     void *entry;
> +     int i;
> +
> +     if (inode->i_blkbits != PAGE_SHIFT) {
> +             WARN_ON_ONCE(1);
> +             return;
> +     }

again

> +     rcu_read_lock();
> +     entry = radix_tree_lookup(&mapping->page_tree, start & PMD_MASK);
> +     rcu_read_unlock();

What stabilizes the memory at *entry after rcu_read_unlock()?

> +     /* see if the start of our range is covered by a PMD entry */
> +     if (entry && RADIX_DAX_TYPE(entry) == RADIX_DAX_PMD)
> +             start &= PMD_MASK;
> +
> +     start_page = start >> PAGE_CACHE_SHIFT;
> +     end_page = end >> PAGE_CACHE_SHIFT;
> +
> +     tag_pages_for_writeback(mapping, start_page, end_page);
> +
> +     pagevec_init(&pvec, 0);
> +     while (1) {
> +             pvec.nr = find_get_entries_tag(mapping, start_page,
> +                             PAGECACHE_TAG_TOWRITE, PAGEVEC_SIZE,
> +                             pvec.pages, indices);
> +
> +             if (pvec.nr == 0)
> +                     break;
> +
> +             for (i = 0; i < pvec.nr; i++)
> +                     dax_writeback_one(mapping, indices[i], pvec.pages[i]);
> +     }
> +     wmb_pmem();
> +}
> +EXPORT_SYMBOL_GPL(dax_writeback_mapping_range);
> +
>
> ...
>

<Prev in Thread] Current Thread [Next in Thread>