Skip to content

Commit

Permalink
Merge branch 'openzfs:master' into BSE-24233-control
Browse files Browse the repository at this point in the history
  • Loading branch information
rnickle authored Sep 26, 2024
2 parents 128bfb3 + ab1b87e commit e2f82b4
Show file tree
Hide file tree
Showing 7 changed files with 144 additions and 6 deletions.
33 changes: 33 additions & 0 deletions config/kernel-vfs-invalidate_folio.m4
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
dnl #
dnl # Linux 5.18 uses invalidate_folio in lieu of invalidate_page
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_VFS_INVALIDATE_FOLIO], [
ZFS_LINUX_TEST_SRC([vfs_has_invalidate_folio], [
#include <linux/fs.h>
static void
test_invalidate_folio(struct folio *folio, size_t offset,
size_t len) {
(void) folio; (void) offset; (void) len;
return;
}
static const struct address_space_operations
aops __attribute__ ((unused)) = {
.invalidate_folio = test_invalidate_folio,
};
],[])
])

AC_DEFUN([ZFS_AC_KERNEL_VFS_INVALIDATE_FOLIO], [
dnl #
dnl # Linux 5.18 uses invalidate_folio in lieu of invalidate_page
dnl #
AC_MSG_CHECKING([whether invalidate_folio exists])
ZFS_LINUX_TEST_RESULT([vfs_has_invalidate_folio], [
AC_MSG_RESULT([yes])
AC_DEFINE(HAVE_VFS_INVALIDATE_FOLIO, 1, [invalidate_folio exists])
],[
AC_MSG_RESULT([no])
])
])
32 changes: 32 additions & 0 deletions config/kernel-vfs-release_folio.m4
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
dnl #
dnl # Linux 5.19 uses release_folio in lieu of releasepage
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_VFS_RELEASE_FOLIO], [
ZFS_LINUX_TEST_SRC([vfs_has_release_folio], [
#include <linux/fs.h>
static bool
test_release_folio(struct folio *folio, gfp_t gfp) {
(void) folio; (void) gfp;
return (0);
}
static const struct address_space_operations
aops __attribute__ ((unused)) = {
.release_folio = test_release_folio,
};
],[])
])

AC_DEFUN([ZFS_AC_KERNEL_VFS_RELEASE_FOLIO], [
dnl #
dnl # Linux 5.19 uses release_folio in lieu of releasepage
dnl #
AC_MSG_CHECKING([whether release_folio exists])
ZFS_LINUX_TEST_RESULT([vfs_has_release_folio], [
AC_MSG_RESULT([yes])
AC_DEFINE(HAVE_VFS_RELEASE_FOLIO, 1, [release_folio exists])
],[
AC_MSG_RESULT([no])
])
])
4 changes: 4 additions & 0 deletions config/kernel.m4
Original file line number Diff line number Diff line change
Expand Up @@ -77,6 +77,8 @@ AC_DEFUN([ZFS_AC_KERNEL_TEST_SRC], [
ZFS_AC_KERNEL_SRC_SGET
ZFS_AC_KERNEL_SRC_VFS_FILEMAP_DIRTY_FOLIO
ZFS_AC_KERNEL_SRC_VFS_READ_FOLIO
ZFS_AC_KERNEL_SRC_VFS_RELEASE_FOLIO
ZFS_AC_KERNEL_SRC_VFS_INVALIDATE_FOLIO
ZFS_AC_KERNEL_SRC_VFS_FSYNC_2ARGS
ZFS_AC_KERNEL_SRC_VFS_DIRECT_IO
ZFS_AC_KERNEL_SRC_VFS_READPAGES
Expand Down Expand Up @@ -185,6 +187,8 @@ AC_DEFUN([ZFS_AC_KERNEL_TEST_RESULT], [
ZFS_AC_KERNEL_SGET
ZFS_AC_KERNEL_VFS_FILEMAP_DIRTY_FOLIO
ZFS_AC_KERNEL_VFS_READ_FOLIO
ZFS_AC_KERNEL_VFS_RELEASE_FOLIO
ZFS_AC_KERNEL_VFS_INVALIDATE_FOLIO
ZFS_AC_KERNEL_VFS_FSYNC_2ARGS
ZFS_AC_KERNEL_VFS_DIRECT_IO
ZFS_AC_KERNEL_VFS_READPAGES
Expand Down
17 changes: 17 additions & 0 deletions module/os/linux/zfs/zfs_vnops_os.c
Original file line number Diff line number Diff line change
Expand Up @@ -260,6 +260,15 @@ update_pages(znode_t *zp, int64_t start, int len, objset_t *os)
} else {
ClearPageError(pp);
SetPageUptodate(pp);
if (!PagePrivate(pp)) {
/*
* Set private bit so page migration
* will wait for us to finish writeback
* before calling migrate_folio().
*/
SetPagePrivate(pp);
get_page(pp);
}

if (mapping_writably_mapped(mp))
flush_dcache_page(pp);
Expand Down Expand Up @@ -4037,6 +4046,14 @@ zfs_fillpage(struct inode *ip, struct page *pp)
} else {
ClearPageError(pp);
SetPageUptodate(pp);
if (!PagePrivate(pp)) {
/*
* Set private bit so page migration will wait for us to
* finish writeback before calling migrate_folio().
*/
SetPagePrivate(pp);
get_page(pp);
}
}

return (error);
Expand Down
8 changes: 8 additions & 0 deletions module/os/linux/zfs/zfs_znode_os.c
Original file line number Diff line number Diff line change
Expand Up @@ -1576,6 +1576,14 @@ zfs_zero_partial_page(znode_t *zp, uint64_t start, uint64_t len)
mark_page_accessed(pp);
SetPageUptodate(pp);
ClearPageError(pp);
if (!PagePrivate(pp)) {
/*
* Set private bit so page migration will wait for us to
* finish writeback before calling migrate_folio().
*/
SetPagePrivate(pp);
get_page(pp);
}
unlock_page(pp);
put_page(pp);
}
Expand Down
46 changes: 46 additions & 0 deletions module/os/linux/zfs/zpl_file.c
Original file line number Diff line number Diff line change
Expand Up @@ -607,6 +607,42 @@ zpl_writepage(struct page *pp, struct writeback_control *wbc)
return (zpl_putpage(pp, wbc, &for_sync));
}

static int
zpl_releasepage(struct page *pp, gfp_t gfp)
{
if (PagePrivate(pp)) {
ClearPagePrivate(pp);
put_page(pp);
}
return (1);
}

#ifdef HAVE_VFS_RELEASE_FOLIO
static bool
zpl_release_folio(struct folio *folio, gfp_t gfp)
{
return (zpl_releasepage(&folio->page, gfp));
}
#endif

#ifdef HAVE_VFS_INVALIDATE_FOLIO
static void
zpl_invalidate_folio(struct folio *folio, size_t offset, size_t len)
{
if ((offset == 0) && (len == PAGE_SIZE)) {
zpl_releasepage(&folio->page, 0);
}
}
#else
static void
zpl_invalidatepage(struct page *pp, unsigned int offset, unsigned int len)
{
if ((offset == 0) && (len == PAGE_SIZE)) {
zpl_releasepage(pp, 0);
}
}
#endif

/*
* The flag combination which matches the behavior of zfs_space() is
* FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE. The FALLOC_FL_PUNCH_HOLE
Expand Down Expand Up @@ -1090,6 +1126,16 @@ const struct address_space_operations zpl_address_space_operations = {
#ifdef HAVE_VFS_FILEMAP_DIRTY_FOLIO
.dirty_folio = filemap_dirty_folio,
#endif
#ifdef HAVE_VFS_RELEASE_FOLIO
.release_folio = zpl_release_folio,
#else
.releasepage = zpl_releasepage,
#endif
#ifdef HAVE_VFS_INVALIDATE_FOLIO
.invalidate_folio = zpl_invalidate_folio,
#else
.invalidatepage = zpl_invalidatepage,
#endif
};

const struct file_operations zpl_file_operations = {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,9 +28,8 @@
# 2. Create a pool.
# 3. Backup the cachefile.
# 4. Simulate the pool being torn down without export:
# 4.1. Copy the underlying device state.
# 4.2. Export the pool.
# 4.3. Restore the device state from the copy.
# 4.1. Sync then freeze the pool.
# 4.2. Export the pool (uncleanly).
# 5. Change the hostid.
# 6. Verify that importing the pool from the cachefile fails.
# 7. Verify that importing the pool from the cachefile with force
Expand All @@ -57,10 +56,9 @@ log_must zpool create -o cachefile=$CPATH $TESTPOOL1 $VDEV0
log_must cp $CPATH $CPATHBKP

# 4. Simulate the pool being torn down without export.
log_must cp $VDEV0 $VDEV0.bak
sync_pool $TESTPOOL1
log_must zpool freeze $TESTPOOL1
log_must zpool export $TESTPOOL1
log_must cp -f $VDEV0.bak $VDEV0
log_must rm -f $VDEV0.bak

# 5. Change the hostid.
log_must zgenhostid -f $HOSTID2
Expand Down

0 comments on commit e2f82b4

Please sign in to comment.