提交 e4f9ba20 编写于 作者: I Ira Weiny 提交者: Darrick J. Wong

fs/xfs: Update xfs_ioctl_setattr_dax_invalidate()

Because of the separation of FS_XFLAG_DAX from S_DAX and the delayed
setting of S_DAX, data invalidation no longer needs to happen when
FS_XFLAG_DAX is changed.

Change xfs_ioctl_setattr_dax_invalidate() to be
xfs_ioctl_dax_check_set_cache() and alter the code to reflect the new
functionality.

Furthermore, we no longer need the locking so we remove the join_flags
logic.
Signed-off-by: NIra Weiny <ira.weiny@intel.com>
Reviewed-by: NDarrick J. Wong <darrick.wong@oracle.com>
Signed-off-by: NDarrick J. Wong <darrick.wong@oracle.com>
上级 b5df6284
......@@ -1245,64 +1245,26 @@ xfs_ioctl_setattr_xflags(
return 0;
}
/*
* If we are changing DAX flags, we have to ensure the file is clean and any
* cached objects in the address space are invalidated and removed. This
* requires us to lock out other IO and page faults similar to a truncate
* operation. The locks need to be held until the transaction has been committed
* so that the cache invalidation is atomic with respect to the DAX flag
* manipulation.
*/
static int
xfs_ioctl_setattr_dax_invalidate(
static void
xfs_ioctl_setattr_prepare_dax(
struct xfs_inode *ip,
struct fsxattr *fa,
int *join_flags)
struct fsxattr *fa)
{
struct inode *inode = VFS_I(ip);
struct super_block *sb = inode->i_sb;
int error;
*join_flags = 0;
/*
* It is only valid to set the DAX flag on regular files and
* directories on filesystems where the block size is equal to the page
* size. On directories it serves as an inherited hint so we don't
* have to check the device for dax support or flush pagecache.
*/
if (fa->fsx_xflags & FS_XFLAG_DAX) {
struct xfs_buftarg *target = xfs_inode_buftarg(ip);
if (!bdev_dax_supported(target->bt_bdev, sb->s_blocksize))
return -EINVAL;
}
/* If the DAX state is not changing, we have nothing to do here. */
if ((fa->fsx_xflags & FS_XFLAG_DAX) && IS_DAX(inode))
return 0;
if (!(fa->fsx_xflags & FS_XFLAG_DAX) && !IS_DAX(inode))
return 0;
struct xfs_mount *mp = ip->i_mount;
struct inode *inode = VFS_I(ip);
if (S_ISDIR(inode->i_mode))
return 0;
/* lock, flush and invalidate mapping in preparation for flag change */
xfs_ilock(ip, XFS_MMAPLOCK_EXCL | XFS_IOLOCK_EXCL);
error = filemap_write_and_wait(inode->i_mapping);
if (error)
goto out_unlock;
error = invalidate_inode_pages2(inode->i_mapping);
if (error)
goto out_unlock;
*join_flags = XFS_MMAPLOCK_EXCL | XFS_IOLOCK_EXCL;
return 0;
return;
out_unlock:
xfs_iunlock(ip, XFS_MMAPLOCK_EXCL | XFS_IOLOCK_EXCL);
return error;
if ((mp->m_flags & XFS_MOUNT_DAX_ALWAYS) ||
(mp->m_flags & XFS_MOUNT_DAX_NEVER))
return;
if (((fa->fsx_xflags & FS_XFLAG_DAX) &&
!(ip->i_d.di_flags2 & XFS_DIFLAG2_DAX)) ||
(!(fa->fsx_xflags & FS_XFLAG_DAX) &&
(ip->i_d.di_flags2 & XFS_DIFLAG2_DAX)))
d_mark_dontcache(inode);
}
/*
......@@ -1310,17 +1272,10 @@ xfs_ioctl_setattr_dax_invalidate(
* have permission to do so. On success, return a clean transaction and the
* inode locked exclusively ready for further operation specific checks. On
* failure, return an error without modifying or locking the inode.
*
* The inode might already be IO locked on call. If this is the case, it is
* indicated in @join_flags and we take full responsibility for ensuring they
* are unlocked from now on. Hence if we have an error here, we still have to
* unlock them. Otherwise, once they are joined to the transaction, they will
* be unlocked on commit/cancel.
*/
static struct xfs_trans *
xfs_ioctl_setattr_get_trans(
struct xfs_inode *ip,
int join_flags)
struct xfs_inode *ip)
{
struct xfs_mount *mp = ip->i_mount;
struct xfs_trans *tp;
......@@ -1337,8 +1292,7 @@ xfs_ioctl_setattr_get_trans(
goto out_unlock;
xfs_ilock(ip, XFS_ILOCK_EXCL);
xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL | join_flags);
join_flags = 0;
xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
/*
* CAP_FOWNER overrides the following restrictions:
......@@ -1359,8 +1313,6 @@ xfs_ioctl_setattr_get_trans(
out_cancel:
xfs_trans_cancel(tp);
out_unlock:
if (join_flags)
xfs_iunlock(ip, join_flags);
return ERR_PTR(error);
}
......@@ -1486,7 +1438,6 @@ xfs_ioctl_setattr(
struct xfs_dquot *pdqp = NULL;
struct xfs_dquot *olddquot = NULL;
int code;
int join_flags = 0;
trace_xfs_ioctl_setattr(ip);
......@@ -1510,18 +1461,9 @@ xfs_ioctl_setattr(
return code;
}
/*
* Changing DAX config may require inode locking for mapping
* invalidation. These need to be held all the way to transaction commit
* or cancel time, so need to be passed through to
* xfs_ioctl_setattr_get_trans() so it can apply them to the join call
* appropriately.
*/
code = xfs_ioctl_setattr_dax_invalidate(ip, fa, &join_flags);
if (code)
goto error_free_dquots;
xfs_ioctl_setattr_prepare_dax(ip, fa);
tp = xfs_ioctl_setattr_get_trans(ip, join_flags);
tp = xfs_ioctl_setattr_get_trans(ip);
if (IS_ERR(tp)) {
code = PTR_ERR(tp);
goto error_free_dquots;
......@@ -1651,7 +1593,6 @@ xfs_ioc_setxflags(
struct fsxattr fa;
struct fsxattr old_fa;
unsigned int flags;
int join_flags = 0;
int error;
if (copy_from_user(&flags, arg, sizeof(flags)))
......@@ -1668,18 +1609,9 @@ xfs_ioc_setxflags(
if (error)
return error;
/*
* Changing DAX config may require inode locking for mapping
* invalidation. These need to be held all the way to transaction commit
* or cancel time, so need to be passed through to
* xfs_ioctl_setattr_get_trans() so it can apply them to the join call
* appropriately.
*/
error = xfs_ioctl_setattr_dax_invalidate(ip, &fa, &join_flags);
if (error)
goto out_drop_write;
xfs_ioctl_setattr_prepare_dax(ip, &fa);
tp = xfs_ioctl_setattr_get_trans(ip, join_flags);
tp = xfs_ioctl_setattr_get_trans(ip);
if (IS_ERR(tp)) {
error = PTR_ERR(tp);
goto out_drop_write;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册