File: [Development] / linux-2.6-xfs / fs / xfs / linux-2.6 / xfs_fs_subr.c (download)
Revision 1.48, Thu Jan 11 14:43:27 2007 UTC (10 years, 9 months ago) by dgc.longdrop.melbourne.sgi.com
Branch: MAIN
Changes since 1.47: +8 -2
lines
Unmap pages before removing them from the page cache.
The new cancel_dirty_pages() code found that XFS was removing
page from the page cache that had dirty page table entries.
XFS invalidates page cache pages via internal interfaces which
are implemented via truncate_inode_pages which does not remove
the page mapping first.
Switch to using invalidate_inode_pages2_range() which does almost
the same thing except it also removes page table mappings as
expected by cancel_dirty_pages.
Merge of xfs-linux-melb:xfs-kern:27909a by kenmcd.
Convert truncate_inode_pages to invalidate_inode_pages2_range
so that page table mappings are removed before we remove the
pages from the page cache.
|
/*
* Copyright (c) 2000-2002,2005-2006 Silicon Graphics, Inc.
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it would be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "xfs.h"
int fs_noerr(void) { return 0; }
int fs_nosys(void) { return ENOSYS; }
void fs_noval(void) { return; }
#define XFS_OFF_TO_PCINDEX(off) ((off) >> PAGE_CACHE_SHIFT)
void
fs_tosspages(
bhv_desc_t *bdp,
xfs_off_t first,
xfs_off_t last,
int fiopt)
{
bhv_vnode_t *vp = BHV_TO_VNODE(bdp);
struct inode *ip = vn_to_inode(vp);
if (VN_CACHED(vp))
invalidate_inode_pages2_range(ip->i_mapping,
XFS_OFF_TO_PCINDEX(first),
XFS_OFF_TO_PCINDEX(last));
}
void
fs_flushinval_pages(
bhv_desc_t *bdp,
xfs_off_t first,
xfs_off_t last,
int fiopt)
{
bhv_vnode_t *vp = BHV_TO_VNODE(bdp);
struct inode *ip = vn_to_inode(vp);
if (VN_CACHED(vp)) {
if (VN_TRUNC(vp))
VUNTRUNCATE(vp);
filemap_write_and_wait(ip->i_mapping);
invalidate_inode_pages2_range(ip->i_mapping,
XFS_OFF_TO_PCINDEX(first),
XFS_OFF_TO_PCINDEX(last));
}
}
int
fs_flush_pages(
bhv_desc_t *bdp,
xfs_off_t first,
xfs_off_t last,
uint64_t flags,
int fiopt)
{
bhv_vnode_t *vp = BHV_TO_VNODE(bdp);
struct inode *ip = vn_to_inode(vp);
if (VN_DIRTY(vp)) {
if (VN_TRUNC(vp))
VUNTRUNCATE(vp);
filemap_fdatawrite(ip->i_mapping);
if (flags & XFS_B_ASYNC)
return 0;
filemap_fdatawait(ip->i_mapping);
}
return 0;
}