diff options
Diffstat (limited to 'fs/f2fs/super.c')
| -rw-r--r-- | fs/f2fs/super.c | 210 |
1 files changed, 108 insertions, 102 deletions
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c index db7afb806411..c4c225e09dc4 100644 --- a/fs/f2fs/super.c +++ b/fs/f2fs/super.c @@ -352,7 +352,7 @@ static match_table_t f2fs_checkpoint_tokens = { struct f2fs_fs_context { struct f2fs_mount_info info; - unsigned int opt_mask; /* Bits changed */ + unsigned long long opt_mask; /* Bits changed */ unsigned int spec_mask; unsigned short qname_mask; }; @@ -360,23 +360,23 @@ struct f2fs_fs_context { #define F2FS_CTX_INFO(ctx) ((ctx)->info) static inline void ctx_set_opt(struct f2fs_fs_context *ctx, - unsigned int flag) + enum f2fs_mount_opt flag) { - ctx->info.opt |= flag; - ctx->opt_mask |= flag; + ctx->info.opt |= BIT(flag); + ctx->opt_mask |= BIT(flag); } static inline void ctx_clear_opt(struct f2fs_fs_context *ctx, - unsigned int flag) + enum f2fs_mount_opt flag) { - ctx->info.opt &= ~flag; - ctx->opt_mask |= flag; + ctx->info.opt &= ~BIT(flag); + ctx->opt_mask |= BIT(flag); } static inline bool ctx_test_opt(struct f2fs_fs_context *ctx, - unsigned int flag) + enum f2fs_mount_opt flag) { - return ctx->info.opt & flag; + return ctx->info.opt & BIT(flag); } void f2fs_printk(struct f2fs_sb_info *sbi, bool limit_rate, @@ -1371,7 +1371,7 @@ static int f2fs_check_compression(struct fs_context *fc, ctx_test_opt(ctx, F2FS_MOUNT_COMPRESS_CACHE)) f2fs_info(sbi, "Image doesn't support compression"); clear_compression_spec(ctx); - ctx->opt_mask &= ~F2FS_MOUNT_COMPRESS_CACHE; + ctx->opt_mask &= ~BIT(F2FS_MOUNT_COMPRESS_CACHE); return 0; } if (ctx->spec_mask & F2FS_SPEC_compress_extension) { @@ -1439,42 +1439,42 @@ static int f2fs_check_opt_consistency(struct fs_context *fc, return -EINVAL; if (f2fs_hw_should_discard(sbi) && - (ctx->opt_mask & F2FS_MOUNT_DISCARD) && + (ctx->opt_mask & BIT(F2FS_MOUNT_DISCARD)) && !ctx_test_opt(ctx, F2FS_MOUNT_DISCARD)) { f2fs_warn(sbi, "discard is required for zoned block devices"); return -EINVAL; } if (!f2fs_hw_support_discard(sbi) && - (ctx->opt_mask & F2FS_MOUNT_DISCARD) && + (ctx->opt_mask & BIT(F2FS_MOUNT_DISCARD)) && ctx_test_opt(ctx, F2FS_MOUNT_DISCARD)) { f2fs_warn(sbi, "device does not support discard"); ctx_clear_opt(ctx, F2FS_MOUNT_DISCARD); - ctx->opt_mask &= ~F2FS_MOUNT_DISCARD; + ctx->opt_mask &= ~BIT(F2FS_MOUNT_DISCARD); } if (f2fs_sb_has_device_alias(sbi) && - (ctx->opt_mask & F2FS_MOUNT_READ_EXTENT_CACHE) && + (ctx->opt_mask & BIT(F2FS_MOUNT_READ_EXTENT_CACHE)) && !ctx_test_opt(ctx, F2FS_MOUNT_READ_EXTENT_CACHE)) { f2fs_err(sbi, "device aliasing requires extent cache"); return -EINVAL; } if (test_opt(sbi, RESERVE_ROOT) && - (ctx->opt_mask & F2FS_MOUNT_RESERVE_ROOT) && + (ctx->opt_mask & BIT(F2FS_MOUNT_RESERVE_ROOT)) && ctx_test_opt(ctx, F2FS_MOUNT_RESERVE_ROOT)) { f2fs_info(sbi, "Preserve previous reserve_root=%u", F2FS_OPTION(sbi).root_reserved_blocks); ctx_clear_opt(ctx, F2FS_MOUNT_RESERVE_ROOT); - ctx->opt_mask &= ~F2FS_MOUNT_RESERVE_ROOT; + ctx->opt_mask &= ~BIT(F2FS_MOUNT_RESERVE_ROOT); } if (test_opt(sbi, RESERVE_NODE) && - (ctx->opt_mask & F2FS_MOUNT_RESERVE_NODE) && + (ctx->opt_mask & BIT(F2FS_MOUNT_RESERVE_NODE)) && ctx_test_opt(ctx, F2FS_MOUNT_RESERVE_NODE)) { f2fs_info(sbi, "Preserve previous reserve_node=%u", F2FS_OPTION(sbi).root_reserved_nodes); ctx_clear_opt(ctx, F2FS_MOUNT_RESERVE_NODE); - ctx->opt_mask &= ~F2FS_MOUNT_RESERVE_NODE; + ctx->opt_mask &= ~BIT(F2FS_MOUNT_RESERVE_NODE); } err = f2fs_check_test_dummy_encryption(fc, sb); @@ -1759,6 +1759,7 @@ static struct inode *f2fs_alloc_inode(struct super_block *sb) atomic_set(&fi->dirty_pages, 0); atomic_set(&fi->i_compr_blocks, 0); atomic_set(&fi->open_count, 0); + atomic_set(&fi->writeback, 0); init_f2fs_rwsem(&fi->i_sem); spin_lock_init(&fi->i_size_lock); INIT_LIST_HEAD(&fi->dirty_list); @@ -1798,7 +1799,7 @@ static int f2fs_drop_inode(struct inode *inode) * - f2fs_gc -> iput -> evict * - inode_wait_for_writeback(inode) */ - if ((!inode_unhashed(inode) && inode->i_state & I_SYNC)) { + if ((!inode_unhashed(inode) && inode_state_read(inode) & I_SYNC)) { if (!inode->i_nlink && !is_bad_inode(inode)) { /* to avoid evict_inode call simultaneously */ __iget(inode); @@ -1988,14 +1989,6 @@ static void f2fs_put_super(struct super_block *sb) truncate_inode_pages_final(META_MAPPING(sbi)); } - for (i = 0; i < NR_COUNT_TYPE; i++) { - if (!get_pages(sbi, i)) - continue; - f2fs_err(sbi, "detect filesystem reference count leak during " - "umount, type: %d, count: %lld", i, get_pages(sbi, i)); - f2fs_bug_on(sbi, 1); - } - f2fs_bug_on(sbi, sbi->fsync_node_num); f2fs_destroy_compress_inode(sbi); @@ -2006,6 +1999,15 @@ static void f2fs_put_super(struct super_block *sb) iput(sbi->meta_inode); sbi->meta_inode = NULL; + /* Should check the page counts after dropping all node/meta pages */ + for (i = 0; i < NR_COUNT_TYPE; i++) { + if (!get_pages(sbi, i)) + continue; + f2fs_err(sbi, "detect filesystem reference count leak during " + "umount, type: %d, count: %lld", i, get_pages(sbi, i)); + f2fs_bug_on(sbi, 1); + } + /* * iput() can update stat information, if f2fs_write_checkpoint() * above failed with error. @@ -2026,7 +2028,6 @@ static void f2fs_put_super(struct super_block *sb) kfree(sbi->raw_super); f2fs_destroy_page_array_cache(sbi); - f2fs_destroy_xattr_caches(sbi); #ifdef CONFIG_QUOTA for (i = 0; i < MAXQUOTAS; i++) kfree(F2FS_OPTION(sbi).s_qf_names[i]); @@ -2632,12 +2633,14 @@ restore_flag: return err; } -static void f2fs_enable_checkpoint(struct f2fs_sb_info *sbi) +static int f2fs_enable_checkpoint(struct f2fs_sb_info *sbi) { unsigned int nr_pages = get_pages(sbi, F2FS_DIRTY_DATA) / 16; - long long start, writeback, end; + long long start, writeback, lock, sync_inode, end; + int ret; - f2fs_info(sbi, "f2fs_enable_checkpoint() starts, meta: %lld, node: %lld, data: %lld", + f2fs_info(sbi, "%s start, meta: %lld, node: %lld, data: %lld", + __func__, get_pages(sbi, F2FS_DIRTY_META), get_pages(sbi, F2FS_DIRTY_NODES), get_pages(sbi, F2FS_DIRTY_DATA)); @@ -2649,18 +2652,25 @@ static void f2fs_enable_checkpoint(struct f2fs_sb_info *sbi) /* we should flush all the data to keep data consistency */ while (get_pages(sbi, F2FS_DIRTY_DATA)) { writeback_inodes_sb_nr(sbi->sb, nr_pages, WB_REASON_SYNC); - f2fs_io_schedule_timeout(DEFAULT_IO_TIMEOUT); + f2fs_io_schedule_timeout(DEFAULT_SCHEDULE_TIMEOUT); if (f2fs_time_over(sbi, ENABLE_TIME)) break; } writeback = ktime_get(); - sync_inodes_sb(sbi->sb); + f2fs_down_write(&sbi->cp_enable_rwsem); + + lock = ktime_get(); + + if (get_pages(sbi, F2FS_DIRTY_DATA)) + sync_inodes_sb(sbi->sb); if (unlikely(get_pages(sbi, F2FS_DIRTY_DATA))) - f2fs_warn(sbi, "checkpoint=enable has some unwritten data: %lld", - get_pages(sbi, F2FS_DIRTY_DATA)); + f2fs_warn(sbi, "%s: has some unwritten data: %lld", + __func__, get_pages(sbi, F2FS_DIRTY_DATA)); + + sync_inode = ktime_get(); f2fs_down_write(&sbi->gc_lock); f2fs_dirty_to_prefree(sbi); @@ -2669,16 +2679,32 @@ static void f2fs_enable_checkpoint(struct f2fs_sb_info *sbi) set_sbi_flag(sbi, SBI_IS_DIRTY); f2fs_up_write(&sbi->gc_lock); - f2fs_sync_fs(sbi->sb, 1); + f2fs_info(sbi, "%s sync_fs, meta: %lld, imeta: %lld, node: %lld, dents: %lld, qdata: %lld", + __func__, + get_pages(sbi, F2FS_DIRTY_META), + get_pages(sbi, F2FS_DIRTY_IMETA), + get_pages(sbi, F2FS_DIRTY_NODES), + get_pages(sbi, F2FS_DIRTY_DENTS), + get_pages(sbi, F2FS_DIRTY_QDATA)); + ret = f2fs_sync_fs(sbi->sb, 1); + if (ret) + f2fs_err(sbi, "%s sync_fs failed, ret: %d", __func__, ret); /* Let's ensure there's no pending checkpoint anymore */ f2fs_flush_ckpt_thread(sbi); + f2fs_up_write(&sbi->cp_enable_rwsem); + end = ktime_get(); - f2fs_info(sbi, "f2fs_enable_checkpoint() finishes, writeback:%llu, sync:%llu", - ktime_ms_delta(writeback, start), - ktime_ms_delta(end, writeback)); + f2fs_info(sbi, "%s end, writeback:%llu, " + "lock:%llu, sync_inode:%llu, sync_fs:%llu", + __func__, + ktime_ms_delta(writeback, start), + ktime_ms_delta(lock, writeback), + ktime_ms_delta(sync_inode, lock), + ktime_ms_delta(end, sync_inode)); + return ret; } static int __f2fs_remount(struct fs_context *fc, struct super_block *sb) @@ -2892,7 +2918,9 @@ static int __f2fs_remount(struct fs_context *fc, struct super_block *sb) goto restore_discard; need_enable_checkpoint = true; } else { - f2fs_enable_checkpoint(sbi); + err = f2fs_enable_checkpoint(sbi); + if (err) + goto restore_discard; need_disable_checkpoint = true; } } @@ -2935,7 +2963,8 @@ skip: return 0; restore_checkpoint: if (need_enable_checkpoint) { - f2fs_enable_checkpoint(sbi); + if (f2fs_enable_checkpoint(sbi)) + f2fs_warn(sbi, "checkpoint has not been enabled"); } else if (need_disable_checkpoint) { if (f2fs_disable_checkpoint(sbi)) f2fs_warn(sbi, "checkpoint has not been disabled"); @@ -3110,7 +3139,7 @@ retry: &folio, &fsdata); if (unlikely(err)) { if (err == -ENOMEM) { - f2fs_io_schedule_timeout(DEFAULT_IO_TIMEOUT); + memalloc_retry_wait(GFP_NOFS); goto retry; } set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR); @@ -4051,6 +4080,20 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi, if (sanity_check_area_boundary(sbi, folio, index)) return -EFSCORRUPTED; + /* + * Check for legacy summary layout on 16KB+ block devices. + * Modern f2fs-tools packs multiple 4KB summary areas into one block, + * whereas legacy versions used one block per summary, leading + * to a much larger SSA. + */ + if (SUMS_PER_BLOCK > 1 && + !(__F2FS_HAS_FEATURE(raw_super, F2FS_FEATURE_PACKED_SSA))) { + f2fs_info(sbi, "Error: Device formatted with a legacy version. " + "Please reformat with a tool supporting the packed ssa " + "feature for block sizes larger than 4kb."); + return -EOPNOTSUPP; + } + return 0; } @@ -4544,50 +4587,9 @@ void f2fs_save_errors(struct f2fs_sb_info *sbi, unsigned char flag) spin_unlock_irqrestore(&sbi->error_lock, flags); } -static bool f2fs_update_errors(struct f2fs_sb_info *sbi) -{ - unsigned long flags; - bool need_update = false; - - spin_lock_irqsave(&sbi->error_lock, flags); - if (sbi->error_dirty) { - memcpy(F2FS_RAW_SUPER(sbi)->s_errors, sbi->errors, - MAX_F2FS_ERRORS); - sbi->error_dirty = false; - need_update = true; - } - spin_unlock_irqrestore(&sbi->error_lock, flags); - - return need_update; -} - -static void f2fs_record_errors(struct f2fs_sb_info *sbi, unsigned char error) -{ - int err; - - f2fs_down_write(&sbi->sb_lock); - - if (!f2fs_update_errors(sbi)) - goto out_unlock; - - err = f2fs_commit_super(sbi, false); - if (err) - f2fs_err_ratelimited(sbi, - "f2fs_commit_super fails to record errors:%u, err:%d", - error, err); -out_unlock: - f2fs_up_write(&sbi->sb_lock); -} - void f2fs_handle_error(struct f2fs_sb_info *sbi, unsigned char error) { f2fs_save_errors(sbi, error); - f2fs_record_errors(sbi, error); -} - -void f2fs_handle_error_async(struct f2fs_sb_info *sbi, unsigned char error) -{ - f2fs_save_errors(sbi, error); if (!sbi->error_dirty) return; @@ -4904,6 +4906,7 @@ try_onemore: init_f2fs_rwsem(&sbi->node_change); spin_lock_init(&sbi->stat_lock); init_f2fs_rwsem(&sbi->cp_rwsem); + init_f2fs_rwsem(&sbi->cp_enable_rwsem); init_f2fs_rwsem(&sbi->quota_sem); init_waitqueue_head(&sbi->cp_wait); spin_lock_init(&sbi->error_lock); @@ -5015,13 +5018,9 @@ try_onemore: if (err) goto free_iostat; - /* init per sbi slab cache */ - err = f2fs_init_xattr_caches(sbi); - if (err) - goto free_percpu; err = f2fs_init_page_array_cache(sbi); if (err) - goto free_xattr_cache; + goto free_percpu; /* get an inode for meta space */ sbi->meta_inode = f2fs_iget(sb, F2FS_META_INO(sbi)); @@ -5226,11 +5225,15 @@ try_onemore: } } else { err = f2fs_recover_fsync_data(sbi, true); - - if (!f2fs_readonly(sb) && err > 0) { - err = -EINVAL; - f2fs_err(sbi, "Need to recover fsync data"); - goto free_meta; + if (err > 0) { + if (!f2fs_readonly(sb)) { + f2fs_err(sbi, "Need to recover fsync data"); + err = -EINVAL; + goto free_meta; + } else { + f2fs_info(sbi, "drop all fsynced data"); + err = 0; + } } } @@ -5257,13 +5260,12 @@ reset_checkpoint: if (err) goto sync_free_meta; - if (test_opt(sbi, DISABLE_CHECKPOINT)) { + if (test_opt(sbi, DISABLE_CHECKPOINT)) err = f2fs_disable_checkpoint(sbi); - if (err) - goto sync_free_meta; - } else if (is_set_ckpt_flags(sbi, CP_DISABLED_FLAG)) { - f2fs_enable_checkpoint(sbi); - } + else if (is_set_ckpt_flags(sbi, CP_DISABLED_FLAG)) + err = f2fs_enable_checkpoint(sbi); + if (err) + goto sync_free_meta; /* * If filesystem is not mounted as read-only then @@ -5350,8 +5352,6 @@ free_meta_inode: sbi->meta_inode = NULL; free_page_array_cache: f2fs_destroy_page_array_cache(sbi); -free_xattr_cache: - f2fs_destroy_xattr_caches(sbi); free_percpu: destroy_percpu_info(sbi); free_iostat: @@ -5554,10 +5554,15 @@ static int __init init_f2fs_fs(void) err = f2fs_create_casefold_cache(); if (err) goto free_compress_cache; - err = register_filesystem(&f2fs_fs_type); + err = f2fs_init_xattr_cache(); if (err) goto free_casefold_cache; + err = register_filesystem(&f2fs_fs_type); + if (err) + goto free_xattr_cache; return 0; +free_xattr_cache: + f2fs_destroy_xattr_cache(); free_casefold_cache: f2fs_destroy_casefold_cache(); free_compress_cache: @@ -5598,6 +5603,7 @@ fail: static void __exit exit_f2fs_fs(void) { unregister_filesystem(&f2fs_fs_type); + f2fs_destroy_xattr_cache(); f2fs_destroy_casefold_cache(); f2fs_destroy_compress_cache(); f2fs_destroy_compress_mempool(); |
