diff options
-rw-r--r-- | fs/btrfs/subpage.c | 2 | ||||
-rw-r--r-- | fs/btrfs/subpage.h | 9 |
2 files changed, 11 insertions, 0 deletions
diff --git a/fs/btrfs/subpage.c b/fs/btrfs/subpage.c index 8ddd5fcbeb93..631d96f1e905 100644 --- a/fs/btrfs/subpage.c +++ b/fs/btrfs/subpage.c @@ -64,6 +64,7 @@ * This means a slightly higher tree locking latency. */ +#if PAGE_SIZE > SZ_4K bool btrfs_is_subpage(const struct btrfs_fs_info *fs_info, struct address_space *mapping) { if (fs_info->sectorsize >= PAGE_SIZE) @@ -85,6 +86,7 @@ bool btrfs_is_subpage(const struct btrfs_fs_info *fs_info, struct address_space return true; return false; } +#endif void btrfs_init_subpage_info(struct btrfs_subpage_info *subpage_info, u32 sectorsize) { diff --git a/fs/btrfs/subpage.h b/fs/btrfs/subpage.h index 249396e118d0..5532cc4fac50 100644 --- a/fs/btrfs/subpage.h +++ b/fs/btrfs/subpage.h @@ -5,6 +5,7 @@ #include <linux/spinlock.h> #include <linux/atomic.h> +#include <linux/sizes.h> struct address_space; struct folio; @@ -88,7 +89,15 @@ enum btrfs_subpage_type { BTRFS_SUBPAGE_DATA, }; +#if PAGE_SIZE > SZ_4K bool btrfs_is_subpage(const struct btrfs_fs_info *fs_info, struct address_space *mapping); +#else +static inline bool btrfs_is_subpage(const struct btrfs_fs_info *fs_info, + struct address_space *mapping) +{ + return false; +} +#endif void btrfs_init_subpage_info(struct btrfs_subpage_info *subpage_info, u32 sectorsize); int btrfs_attach_subpage(const struct btrfs_fs_info *fs_info, |